code
stringlengths
4
1.01M
language
stringclasses
2 values
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.exec.planner.sql.parser; import java.util.List; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.SqlSpecialOperator; import org.apache.calcite.sql.SqlWriter; import org.apache.calcite.sql.parser.SqlParserPos; import com.dremio.service.namespace.NamespaceKey; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; public class SqlTruncateTable extends SqlCall { public static final SqlSpecialOperator OPERATOR = new SqlSpecialOperator("TRUNCATE_TABLE", SqlKind.OTHER_DDL) { @Override public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) { Preconditions.checkArgument(operands.length == 3, "SqlTruncateTable.createCall() " + "has to get 3 operands!"); return new SqlTruncateTable(pos, (SqlIdentifier) operands[0], (SqlLiteral) operands[1], (SqlLiteral) operands[2]); } }; private SqlIdentifier tableName; private boolean tableExistenceCheck; private boolean tableKeywordPresent; public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, SqlLiteral tableExistenceCheck, SqlLiteral tableKeywordPresent) { this(pos, tableName, tableExistenceCheck.booleanValue(), tableKeywordPresent.booleanValue()); } public SqlTruncateTable(SqlParserPos pos, SqlIdentifier tableName, boolean tableExistenceCheck, boolean tableKeywordPresent) { super(pos); this.tableName = tableName; this.tableExistenceCheck = tableExistenceCheck; this.tableKeywordPresent = tableKeywordPresent; } @Override public SqlOperator getOperator() { return OPERATOR; } @Override public List<SqlNode> getOperandList() { return ImmutableList.of( tableName, SqlLiteral.createBoolean(tableExistenceCheck, SqlParserPos.ZERO), SqlLiteral.createBoolean(tableKeywordPresent, SqlParserPos.ZERO) ); } @Override public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { writer.keyword("TRUNCATE"); if (tableKeywordPresent) { writer.keyword("TABLE"); } if (tableExistenceCheck) { writer.keyword("IF"); writer.keyword("EXISTS"); } tableName.unparse(writer, leftPrec, rightPrec); } public NamespaceKey getPath() { return new NamespaceKey(tableName.names); } public boolean checkTableExistence() { return tableExistenceCheck; } }
Java
# Inga lallensis Benth. SPECIES #### Status ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
<!DOCTYPE HTML> <html> <head> <meta http-equiv="Content-type" content="text/html; charset=utf-8"> <title>Hovercards6 userオプション</title> <script src="http://platform.twitter.com/anywhere.js?id=[APIキー]&amp;v=1" type="text/javascript"></script> </head> <body> @twitterapi をフォローしましょう<br> <img src="./twitter4j.png" id="image" alt="t4j_news"/> <script type="text/javascript"> twttr.anywhere(function (T) { T.hovercards(); T("#image").hovercards( {username: function(node){return node.alt}}); }); </script> </body> </html>
Java
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio; import org.junit.Assert; import org.junit.Test; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * Unit tests for {@link StorageTierAssoc}. */ public class StorageTierAssocTest { private void checkStorageTierAssoc(StorageTierAssoc assoc, PropertyKey levelsProperty, PropertyKeyFormat aliasFormat) { int size = Configuration.getInt(levelsProperty); Assert.assertEquals(size, assoc.size()); List<String> expectedOrderedAliases = new ArrayList<>(); for (int i = 0; i < size; i++) { String alias = Configuration.get(aliasFormat.format(i)); Assert.assertEquals(i, assoc.getOrdinal(alias)); Assert.assertEquals(alias, assoc.getAlias(i)); expectedOrderedAliases.add(alias); } Assert.assertEquals(expectedOrderedAliases, assoc.getOrderedStorageAliases()); } /** * Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc} * classes with a {@link Configuration}. */ @Test public void masterWorkerConfConstructor() { Configuration.set(PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS, "3"); Configuration.set( PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT.format(2), "BOTTOM"); Configuration.set(PropertyKey.WORKER_TIERED_STORE_LEVELS, "2"); Configuration.set( PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT.format(1), "BOTTOM"); checkStorageTierAssoc(new MasterStorageTierAssoc(), PropertyKey.MASTER_TIERED_STORE_GLOBAL_LEVELS, PropertyKeyFormat.MASTER_TIERED_STORE_GLOBAL_LEVEL_ALIAS_FORMAT); checkStorageTierAssoc(new WorkerStorageTierAssoc(), PropertyKey.WORKER_TIERED_STORE_LEVELS, PropertyKeyFormat.WORKER_TIERED_STORE_LEVEL_ALIAS_FORMAT); ConfigurationTestUtils.resetConfiguration(); } /** * Tests the constructors of the {@link MasterStorageTierAssoc} and {@link WorkerStorageTierAssoc} * classes with different storage alias. */ @Test public void storageAliasListConstructor() { List<String> orderedAliases = Arrays.asList("MEM", "HDD", "SOMETHINGELSE", "SSD"); MasterStorageTierAssoc masterAssoc = new MasterStorageTierAssoc(orderedAliases); WorkerStorageTierAssoc workerAssoc = new WorkerStorageTierAssoc(orderedAliases); Assert.assertEquals(orderedAliases.size(), masterAssoc.size()); Assert.assertEquals(orderedAliases.size(), workerAssoc.size()); for (int i = 0; i < orderedAliases.size(); i++) { String alias = orderedAliases.get(i); Assert.assertEquals(alias, masterAssoc.getAlias(i)); Assert.assertEquals(i, masterAssoc.getOrdinal(alias)); Assert.assertEquals(alias, workerAssoc.getAlias(i)); Assert.assertEquals(i, workerAssoc.getOrdinal(alias)); } Assert.assertEquals(orderedAliases, masterAssoc.getOrderedStorageAliases()); Assert.assertEquals(orderedAliases, workerAssoc.getOrderedStorageAliases()); } }
Java
/* * Copyright 2009-2013 by The Regents of the University of California * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * you may obtain a copy of the License from * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uci.ics.asterix.lexergenerator.rules; public class RuleAnythingUntil implements Rule { private char expected; public RuleAnythingUntil clone() { return new RuleAnythingUntil(expected); } public RuleAnythingUntil(char expected) { this.expected = expected; } @Override public String toString() { return " .* " + String.valueOf(expected); } @Override public int hashCode() { return 10 * (int) expected; } @Override public boolean equals(Object o) { if (o == null) return false; if (o instanceof RuleAnythingUntil) { if (((RuleAnythingUntil) o).expected == this.expected) { return true; } } return false; } @Override public String javaAction() { return "currentChar = readNextChar();"; } @Override public String javaMatch(String action) { return "boolean escaped = false;\n" + "while (currentChar != '" + expected + "' || escaped) {\n" + "if(!escaped && currentChar == '\\\\\\\\') {\n" + "escaped = true;\n" + "containsEscapes = true;\n" + "} else {\n" + "escaped = false;\n" + "}\n" + "currentChar = readNextChar();\n" + "}\n" + "if (currentChar == '" + expected + "') {" + action + "}\n"; } }
Java
package com.netwebx.hackerrank.rpc.client; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.InetSocketAddress; import java.net.Socket; /** * Created by apple on 2017/2/26. */ public class RpcImporter<S> { public S importer(final Class<?> serviceClass, final InetSocketAddress addr) { return (S) Proxy.newProxyInstance( serviceClass.getClassLoader(), new Class<?>[]{serviceClass.getInterfaces()[0]}, new InvocationHandler() { @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { Socket socket = null; ObjectOutputStream output = null; ObjectInputStream input = null; try { socket = new Socket(); socket.connect(addr); output = new ObjectOutputStream(socket.getOutputStream()); output.writeUTF(serviceClass.getName()); output.writeUTF(method.getName()); output.writeObject(method.getParameterTypes()); output.writeObject(args); input = new ObjectInputStream(socket.getInputStream()); return input.readObject(); } finally { if (socket != null) { socket.close(); } if (output != null) { output.close(); } if (input != null) { input.close(); } } } } ); } }
Java
require 'adrian/queue' require 'fileutils' module Adrian class DirectoryQueue < Adrian::Queue include Filters def self.create(options = {}) queue = new(options) FileUtils.mkdir_p(queue.available_path) FileUtils.mkdir_p(queue.reserved_path) queue end attr_reader :available_path, :reserved_path, :logger # Note: # There is the possibility of an item being consumed by multiple processes when its still in the queue after its lock expires. # The reason for allowing this is: # 1. It's much simpler than introducing a seperate monitoring process to handle lock expiry. # 2. This is an acceptable and rare event. e.g. it only happens when the process working on the item crashes without being able to release the lock def initialize(options = {}) super @available_path = options.fetch(:path) @reserved_path = options.fetch(:reserved_path, default_reserved_path) @logger = options[:logger] filters << Filters::FileLock.new(:duration => options[:lock_duration], :reserved_path => reserved_path) filters << Filters::Delay.new(:duration => options[:delay]) if options[:delay] end def pop_item items.each do |item| return item if reserve(item) end nil end def push_item(value) item = wrap_item(value) item.move(available_path) item.touch self end def length available_files.count { |file| File.file?(file) } end def include?(value) item = wrap_item(value) items.include?(item) end protected def wrap_item(value) item = value.is_a?(FileItem) ? value : FileItem.new(value) item.logger ||= logger item end def reserve(item) item.move(reserved_path) item.touch true rescue Errno::ENOENT => e false end def items items = files.map { |file| wrap_item(file) } items.reject! { |item| !item.exist? || filter?(item) } items.sort_by(&:updated_at) end def files (available_files + reserved_files).select { |file| File.file?(file) } end def available_files Dir.glob("#{available_path}/*") end def reserved_files Dir.glob("#{reserved_path}/*") end def default_reserved_path File.join(@available_path, 'cur') end end end
Java
// SERVER-4516 and SERVER-6913: test that update and findAndModify tolerate // an _id in the update document, as long as the _id will not be modified var t = db.jstests_server4516; var startingDoc = {_id: 1, a: 1}; function prepare() { t.drop(); t.save(startingDoc); } function update_succeeds(updateDoc, qid, resultDoc) { prepare(); t.update({_id: qid}, updateDoc, true); assert.eq(t.findOne({_id: qid}), resultDoc); prepare(); t.findAndModify({query: {_id: qid}, update: updateDoc, upsert: true}); assert.eq(t.findOne({_id: qid}), resultDoc); } update_succeeds({_id: 1, a: 2}, 1, {_id: 1, a: 2}); update_succeeds({$set: {_id: 1}}, 1, {_id: 1, a: 1}); update_succeeds({_id: 1, b: "a"}, 1, {_id: 1, b: "a"}); update_succeeds({_id: 2, a: 3}, 2, {_id: 2, a: 3}); function update_fails(updateDoc, qid) { prepare(); var res = t.update({_id: qid}, updateDoc, true); assert.writeError(res); assert.eq(t.count(), 1); assert.eq(t.findOne(), startingDoc); prepare(); assert.throws(function() { t.findAndModify({query: {_id: qid}, update: updateDoc, upsert: true}); }); assert.eq(t.count(), 1); assert.eq(t.findOne(), startingDoc); } update_fails({$set: {_id: 2}}, 1); update_fails({_id: 2, a: 3}, 1); update_fails({_id: 2, a: 3}, 3);
Java
package com.oath.cyclops.internal.stream.spliterators.push; import com.oath.cyclops.types.persistent.PersistentCollection; import java.util.Collection; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; /** * Created by johnmcclean on 12/01/2017. */ public class GroupedByTimeOperator<T,C extends PersistentCollection<? super T>,R> extends BaseOperator<T,R> { private final Supplier<? extends C> factory; private final Function<? super C, ? extends R> finalizer; private final long time; private final TimeUnit t; public GroupedByTimeOperator(Operator<T> source, Supplier<? extends C> factory, Function<? super C, ? extends R> finalizer,long time, TimeUnit t){ super(source); this.factory = factory; this.finalizer = finalizer; this.time = time; this.t = t; } @Override public StreamSubscription subscribe(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onComplete) { long toRun = t.toNanos(time); PersistentCollection[] next = {factory.get()}; long[] start ={System.nanoTime()}; StreamSubscription[] upstream = {null}; StreamSubscription sub = new StreamSubscription(){ @Override public void request(long n) { if(n<=0) { onError.accept(new IllegalArgumentException("3.9 While the Subscription is not cancelled, Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the argument is <= 0.")); return; } if(!isOpen) return; super.request(n); upstream[0].request(n); } @Override public void cancel() { upstream[0].cancel(); super.cancel(); } }; upstream[0] = source.subscribe(e-> { try { next[0] = next[0].plus(e); if(System.nanoTime()-start[0] > toRun){ onNext.accept(finalizer.apply((C)next[0])); sub.requested.decrementAndGet(); next[0] = factory.get(); start[0] = System.nanoTime(); } else{ request( upstream,1l); } } catch (Throwable t) { onError.accept(t); } } ,t->{onError.accept(t); sub.requested.decrementAndGet(); if(sub.isActive()) request( upstream,1); },()->{ if(next[0].size()>0) { try { onNext.accept(finalizer.apply((C) next[0])); } catch(Throwable t){ onError.accept(t); } sub.requested.decrementAndGet(); } sub.cancel(); onComplete.run(); }); return sub; } @Override public void subscribeAll(Consumer<? super R> onNext, Consumer<? super Throwable> onError, Runnable onCompleteDs) { long toRun = t.toNanos(time); PersistentCollection[] next = {factory.get()}; long[] start ={System.nanoTime()}; source.subscribeAll(e-> { try { next[0] = next[0].plus(e); if(System.nanoTime()-start[0] > toRun){ onNext.accept(finalizer.apply((C)next[0])); next[0] = factory.get(); start[0] = System.nanoTime(); } } catch (Throwable t) { onError.accept(t); } } ,onError,()->{ if(next[0].size()>0) { try { onNext.accept(finalizer.apply((C) next[0])); } catch(Throwable t){ onError.accept(t); } } onCompleteDs.run(); }); } }
Java
<?php /*************************************************************************** * * * (c) 2004 Vladimir V. Kalynyak, Alexey V. Vinokurov, Ilya M. Shalnev * * * * This is commercial software, only users who have purchased a valid * * license and accept to the terms of the License Agreement can install * * and use this program. * * * **************************************************************************** * PLEASE READ THE FULL TEXT OF THE SOFTWARE LICENSE AGREEMENT IN THE * * "copyright.txt" FILE PROVIDED WITH THIS DISTRIBUTION PACKAGE. * ****************************************************************************/ namespace Tygh\Exceptions; class ClassNotFoundException extends AException { }
Java
// +build linux /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package linux import ( "context" "fmt" "io/ioutil" "os" "path/filepath" "time" "github.com/boltdb/bolt" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events/exchange" "github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/linux/proc" "github.com/containerd/containerd/linux/runctypes" shim "github.com/containerd/containerd/linux/shim/v1" "github.com/containerd/containerd/log" "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/runtime" runc "github.com/containerd/go-runc" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux") empty = &ptypes.Empty{} ) const ( configFilename = "config.json" defaultRuntime = "runc" defaultShim = "containerd-shim" ) func init() { plugin.Register(&plugin.Registration{ Type: plugin.RuntimePlugin, ID: "linux", InitFn: New, Requires: []plugin.Type{ plugin.TaskMonitorPlugin, plugin.MetadataPlugin, }, Config: &Config{ Shim: defaultShim, Runtime: defaultRuntime, }, }) } var _ = (runtime.Runtime)(&Runtime{}) // Config options for the runtime type Config struct { // Shim is a path or name of binary implementing the Shim GRPC API Shim string `toml:"shim"` // Runtime is a path or name of an OCI runtime used by the shim Runtime string `toml:"runtime"` // RuntimeRoot is the path that shall be used by the OCI runtime for its data RuntimeRoot string `toml:"runtime_root"` // NoShim calls runc directly from within the pkg NoShim bool `toml:"no_shim"` // Debug enable debug on the shim ShimDebug bool `toml:"shim_debug"` } // New returns a configured runtime func New(ic *plugin.InitContext) (interface{}, error) { ic.Meta.Platforms = []ocispec.Platform{platforms.DefaultSpec()} if err := os.MkdirAll(ic.Root, 0711); err != nil { return nil, err } if err := os.MkdirAll(ic.State, 0711); err != nil { return nil, err } monitor, err := ic.Get(plugin.TaskMonitorPlugin) if err != nil { return nil, err } m, err := ic.Get(plugin.MetadataPlugin) if err != nil { return nil, err } cfg := ic.Config.(*Config) r := &Runtime{ root: ic.Root, state: ic.State, monitor: monitor.(runtime.TaskMonitor), tasks: runtime.NewTaskList(), db: m.(*metadata.DB), address: ic.Address, events: ic.Events, config: cfg, } tasks, err := r.restoreTasks(ic.Context) if err != nil { return nil, err } // TODO: need to add the tasks to the monitor for _, t := range tasks { if err := r.tasks.AddWithNamespace(t.namespace, t); err != nil { return nil, err } } return r, nil } // Runtime for a linux based system type Runtime struct { root string state string address string monitor runtime.TaskMonitor tasks *runtime.TaskList db *metadata.DB events *exchange.Exchange config *Config } // ID of the runtime func (r *Runtime) ID() string { return pluginID } // Create a new task func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, err error) { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err } if err := identifiers.Validate(id); err != nil { return nil, errors.Wrapf(err, "invalid task id") } ropts, err := r.getRuncOptions(ctx, id) if err != nil { return nil, err } bundle, err := newBundle(id, filepath.Join(r.state, namespace), filepath.Join(r.root, namespace), opts.Spec.Value) if err != nil { return nil, err } defer func() { if err != nil { bundle.Delete() } }() shimopt := ShimLocal(r.config, r.events) if !r.config.NoShim { var cgroup string if opts.Options != nil { v, err := typeurl.UnmarshalAny(opts.Options) if err != nil { return nil, err } cgroup = v.(*runctypes.CreateOptions).ShimCgroup } exitHandler := func() { log.G(ctx).WithField("id", id).Info("shim reaped") t, err := r.tasks.Get(ctx, id) if err != nil { // Task was never started or was already sucessfully deleted return } lc := t.(*Task) // Stop the monitor if err := r.monitor.Stop(lc); err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "id": id, "namespace": namespace, }).Warn("failed to stop monitor") } log.G(ctx).WithFields(logrus.Fields{ "id": id, "namespace": namespace, }).Warn("cleaning up after killed shim") if err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid); err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "id": id, "namespace": namespace, }).Warn("failed to clen up after killed shim") } } shimopt = ShimRemote(r.config, r.address, cgroup, exitHandler) } s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts) if err != nil { return nil, err } defer func() { if err != nil { if kerr := s.KillShim(ctx); kerr != nil { log.G(ctx).WithError(err).Error("failed to kill shim") } } }() rt := r.config.Runtime if ropts != nil && ropts.Runtime != "" { rt = ropts.Runtime } sopts := &shim.CreateTaskRequest{ ID: id, Bundle: bundle.path, Runtime: rt, Stdin: opts.IO.Stdin, Stdout: opts.IO.Stdout, Stderr: opts.IO.Stderr, Terminal: opts.IO.Terminal, Checkpoint: opts.Checkpoint, Options: opts.Options, } for _, m := range opts.Rootfs { sopts.Rootfs = append(sopts.Rootfs, &types.Mount{ Type: m.Type, Source: m.Source, Options: m.Options, }) } cr, err := s.Create(ctx, sopts) if err != nil { return nil, errdefs.FromGRPC(err) } t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor, r.events, proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup)) if err != nil { return nil, err } if err := r.tasks.Add(ctx, t); err != nil { return nil, err } // after the task is created, add it to the monitor if it has a cgroup // this can be different on a checkpoint/restore if t.cg != nil { if err = r.monitor.Monitor(t); err != nil { if _, err := r.Delete(ctx, t); err != nil { log.G(ctx).WithError(err).Error("deleting task after failed monitor") } return nil, err } } r.events.Publish(ctx, runtime.TaskCreateEventTopic, &eventstypes.TaskCreate{ ContainerID: sopts.ID, Bundle: sopts.Bundle, Rootfs: sopts.Rootfs, IO: &eventstypes.TaskIO{ Stdin: sopts.Stdin, Stdout: sopts.Stdout, Stderr: sopts.Stderr, Terminal: sopts.Terminal, }, Checkpoint: sopts.Checkpoint, Pid: uint32(t.pid), }) return t, nil } // Delete a task removing all on disk state func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) { namespace, err := namespaces.NamespaceRequired(ctx) if err != nil { return nil, err } lc, ok := c.(*Task) if !ok { return nil, fmt.Errorf("task cannot be cast as *linux.Task") } if err := r.monitor.Stop(lc); err != nil { return nil, err } bundle := loadBundle( lc.id, filepath.Join(r.state, namespace, lc.id), filepath.Join(r.root, namespace, lc.id), ) rsp, err := lc.shim.Delete(ctx, empty) if err != nil { if cerr := r.cleanupAfterDeadShim(ctx, bundle, namespace, c.ID(), lc.pid); cerr != nil { log.G(ctx).WithError(err).Error("unable to cleanup task") } return nil, errdefs.FromGRPC(err) } r.tasks.Delete(ctx, lc.id) if err := lc.shim.KillShim(ctx); err != nil { log.G(ctx).WithError(err).Error("failed to kill shim") } if err := bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("failed to delete bundle") } r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ ContainerID: lc.id, ExitStatus: rsp.ExitStatus, ExitedAt: rsp.ExitedAt, Pid: rsp.Pid, }) return &runtime.Exit{ Status: rsp.ExitStatus, Timestamp: rsp.ExitedAt, Pid: rsp.Pid, }, nil } // Tasks returns all tasks known to the runtime func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) { return r.tasks.GetAll(ctx) } func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) { dir, err := ioutil.ReadDir(r.state) if err != nil { return nil, err } var o []*Task for _, namespace := range dir { if !namespace.IsDir() { continue } name := namespace.Name() log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace") tasks, err := r.loadTasks(ctx, name) if err != nil { return nil, err } o = append(o, tasks...) } return o, nil } // Get a specific task by task id func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) { return r.tasks.Get(ctx, id) } func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) { dir, err := ioutil.ReadDir(filepath.Join(r.state, ns)) if err != nil { return nil, err } var o []*Task for _, path := range dir { if !path.IsDir() { continue } id := path.Name() bundle := loadBundle( id, filepath.Join(r.state, ns, id), filepath.Join(r.root, ns, id), ) ctx = namespaces.WithNamespace(ctx, ns) pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile)) s, err := bundle.NewShimClient(ctx, ns, ShimConnect(r.config, func() { err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) if err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") } }), nil) if err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "id": id, "namespace": ns, }).Error("connecting to shim") err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid) if err != nil { log.G(ctx).WithError(err).WithField("bundle", bundle.path). Error("cleaning up after dead shim") } continue } ropts, err := r.getRuncOptions(ctx, id) if err != nil { log.G(ctx).WithError(err).WithField("id", id). Error("get runtime options") continue } t, err := newTask(id, ns, pid, s, r.monitor, r.events, proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup)) if err != nil { log.G(ctx).WithError(err).Error("loading task type") continue } o = append(o, t) } return o, nil } func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int) error { ctx = namespaces.WithNamespace(ctx, ns) if err := r.terminate(ctx, bundle, ns, id); err != nil { if r.config.ShimDebug { return errors.Wrap(err, "failed to terminate task, leaving bundle for debugging") } log.G(ctx).WithError(err).Warn("failed to terminate task") } // Notify Client exitedAt := time.Now().UTC() r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventstypes.TaskExit{ ContainerID: id, ID: id, Pid: uint32(pid), ExitStatus: 128 + uint32(unix.SIGKILL), ExitedAt: exitedAt, }) r.tasks.Delete(ctx, id) if err := bundle.Delete(); err != nil { log.G(ctx).WithError(err).Error("delete bundle") } r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{ ContainerID: id, Pid: uint32(pid), ExitStatus: 128 + uint32(unix.SIGKILL), ExitedAt: exitedAt, }) return nil } func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string) error { rt, err := r.getRuntime(ctx, ns, id) if err != nil { return err } if err := rt.Delete(ctx, id, &runc.DeleteOpts{ Force: true, }); err != nil { log.G(ctx).WithError(err).Warnf("delete runtime state %s", id) } if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil { log.G(ctx).WithError(err).WithFields(logrus.Fields{ "path": bundle.path, "id": id, }).Warnf("unmount task rootfs") } return nil } func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, error) { ropts, err := r.getRuncOptions(ctx, id) if err != nil { return nil, err } var ( cmd = r.config.Runtime root = proc.RuncRoot ) if ropts != nil { if ropts.Runtime != "" { cmd = ropts.Runtime } if ropts.RuntimeRoot != "" { root = ropts.RuntimeRoot } } return &runc.Runc{ Command: cmd, LogFormat: runc.JSON, PdeathSignal: unix.SIGKILL, Root: filepath.Join(root, ns), }, nil } func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) { var container containers.Container if err := r.db.View(func(tx *bolt.Tx) error { store := metadata.NewContainerStore(tx) var err error container, err = store.Get(ctx, id) return err }); err != nil { return nil, err } if container.Runtime.Options != nil { v, err := typeurl.UnmarshalAny(container.Runtime.Options) if err != nil { return nil, err } ropts, ok := v.(*runctypes.RuncOptions) if !ok { return nil, errors.New("invalid runtime options format") } return ropts, nil } return &runctypes.RuncOptions{}, nil }
Java
package net.tcp.socket; import java.io.DataOutputStream; import java.io.IOException; import java.net.ServerSocket; import java.net.Socket; /** * 必须先启动服务器 后连接 1、创建服务器 指定端口 ServerSocket(int port) 2、接收客户端连接 3、发送数据+接收数据 * */ public class Server { /** * @param args * @throws IOException */ public static void main(String[] args) throws IOException { // 1、创建服务器 指定端口 ServerSocket(int port) ServerSocket server = new ServerSocket(8888); // 2、接收客户端连接 阻塞式 while (true) { Socket socket = server.accept(); System.out.println("一个客户端建立连接"); // 3、发送数据 String msg = "欢迎使用"; // 输出流 /* * BufferedWriter bw = new BufferedWriter( new OutputStreamWriter( * socket.getOutputStream())); * * bw.write(msg); bw.newLine(); bw.flush(); */ DataOutputStream dos = new DataOutputStream(socket.getOutputStream()); dos.writeUTF(msg); dos.flush(); } } }
Java
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.management.impl; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Hashtable; import java.util.List; import java.util.NoSuchElementException; import javax.management.MBeanServerConnection; import javax.management.MalformedObjectNameException; import javax.management.ObjectInstance; import javax.management.ObjectName; import org.neo4j.jmx.ManagementInterface; /** * Does not have any public methods - since the public interface of * {@link org.neo4j.management.Neo4jManager} should be defined completely in * that class. * * Does not have any (direct or transitive) dependencies on any part of the jmx * component - since this class is used in * {@link org.neo4j.management.impl.jconsole.Neo4jPlugin the JConsole plugin}, * and the jmx component is not on the class path in JConsole. * * @author Tobias Ivarsson <tobias.ivarsson@neotechnology.com> */ public abstract class KernelProxy { static final String KERNEL_BEAN_TYPE = "org.neo4j.jmx.Kernel"; protected static final String KERNEL_BEAN_NAME = "Kernel"; static final String MBEAN_QUERY = "MBeanQuery"; protected final MBeanServerConnection server; protected final ObjectName kernel; protected KernelProxy( MBeanServerConnection server, ObjectName kernel ) { String className = null; try { className = server.getMBeanInfo( kernel ).getClassName(); } catch ( Exception e ) { // fall through } if ( !KERNEL_BEAN_TYPE.equals( className ) ) { throw new IllegalArgumentException( "The specified ObjectName does not represent a Neo4j Kernel bean in the specified MBean server." ); } this.server = server; this.kernel = kernel; } protected List<Object> allBeans() { List<Object> beans = new ArrayList<Object>(); Iterable<ObjectInstance> mbeans; try { mbeans = server.queryMBeans( mbeanQuery(), null ); } catch ( IOException handled ) { return beans; } for ( ObjectInstance instance : mbeans ) { String className = instance.getClassName(); Class<?> beanType = null; try { if ( className != null ) beanType = Class.forName( className ); } catch ( Exception ignored ) { // fall through } catch ( LinkageError ignored ) { // fall through } if ( beanType != null ) { try { beans.add( BeanProxy.load( server, beanType, instance.getObjectName() ) ); } catch ( Exception ignored ) { // fall through } } } return beans; } private ObjectName assertExists( ObjectName name ) { try { if ( !server.queryNames( name, null ).isEmpty() ) { return name; } } catch ( IOException handled ) { // fall through } throw new NoSuchElementException( "No MBeans matching " + name ); } protected <T> T getBean( Class<T> beanInterface ) { return BeanProxy.load( server, beanInterface, createObjectName( beanInterface ) ); } protected <T> Collection<T> getBeans( Class<T> beanInterface ) { return BeanProxy.loadAll( server, beanInterface, createObjectNameQuery( beanInterface ) ); } private ObjectName createObjectNameQuery( Class<?> beanInterface ) { return createObjectNameQuery( mbeanQuery(), beanInterface ); } private ObjectName createObjectName( Class<?> beanInterface ) { return assertExists( createObjectName( mbeanQuery(), beanInterface ) ); } protected ObjectName createObjectName( String beanName ) { return assertExists( createObjectName( mbeanQuery(), beanName, false ) ); } protected ObjectName mbeanQuery() { try { return (ObjectName) server.getAttribute( kernel, MBEAN_QUERY ); } catch ( Exception cause ) { throw new IllegalStateException( "Could not get MBean query.", cause ); } } protected static ObjectName createObjectName( String kernelIdentifier, Class<?> beanInterface ) { return createObjectName( kernelIdentifier, beanName( beanInterface ) ); } protected static ObjectName createObjectName( String kernelIdentifier, String beanName, String... extraNaming ) { Hashtable<String, String> properties = new Hashtable<String, String>(); properties.put( "instance", "kernel#" + kernelIdentifier ); return createObjectName( "org.neo4j", properties, beanName, false, extraNaming ); } static ObjectName createObjectNameQuery( String kernelIdentifier, String beanName, String... extraNaming ) { Hashtable<String, String> properties = new Hashtable<String, String>(); properties.put( "instance", "kernel#" + kernelIdentifier ); return createObjectName( "org.neo4j", properties, beanName, true, extraNaming ); } static ObjectName createObjectName( ObjectName query, Class<?> beanInterface ) { return createObjectName( query, beanName( beanInterface ), false ); } static ObjectName createObjectNameQuery( ObjectName query, Class<?> beanInterface ) { return createObjectName( query, beanName( beanInterface ), true ); } private static ObjectName createObjectName( ObjectName query, String beanName, boolean isQuery ) { Hashtable<String, String> properties = new Hashtable<String, String>(query.getKeyPropertyList()); return createObjectName( query.getDomain(), properties, beanName, isQuery ); } static String beanName( Class<?> beanInterface ) { if ( beanInterface.isInterface() ) { ManagementInterface management = beanInterface.getAnnotation( ManagementInterface.class ); if ( management != null ) { return management.name(); } } throw new IllegalArgumentException( beanInterface + " is not a Neo4j Management Been interface" ); } private static ObjectName createObjectName( String domain, Hashtable<String, String> properties, String beanName, boolean query, String... extraNaming ) { properties.put( "name", beanName ); for ( int i = 0; i < extraNaming.length; i++ ) { properties.put( "name" + i, extraNaming[i] ); } ObjectName result; try { result = new ObjectName( domain, properties ); if ( query ) result = ObjectName.getInstance( result.toString() + ",*" ); } catch ( MalformedObjectNameException e ) { return null; } return result; } }
Java
<!--//页头--> <div class="contanier"> <div class="link right"> <span>第一次使用微信?</span> <a href="#">立即注册</a> <a href="#">腾讯客服</a> </div> <div class="logo"> <img src="../img/talk_bg.png" alt=""> <span>微信,是一种生活方式</span> </div> </div>
Java
# AUTOGENERATED FILE FROM balenalib/bananapi-m1-plus-ubuntu:disco-build ENV NODE_VERSION 10.23.1 ENV YARN_VERSION 1.22.4 RUN for key in \ 6A010C5166006599AA17F08146C2130DFD2497F5 \ ; do \ gpg --keyserver pgp.mit.edu --recv-keys "$key" || \ gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \ done \ && curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-armv7l.tar.gz" \ && echo "8f965f2757efcf3077d655bfcea36f7a29c58958355e0eb23cfb725740c3ccbe node-v$NODE_VERSION-linux-armv7l.tar.gz" | sha256sum -c - \ && tar -xzf "node-v$NODE_VERSION-linux-armv7l.tar.gz" -C /usr/local --strip-components=1 \ && rm "node-v$NODE_VERSION-linux-armv7l.tar.gz" \ && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \ && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \ && gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \ && mkdir -p /opt/yarn \ && tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \ && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \ && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \ && rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \ && npm config set unsafe-perm true -g --unsafe-perm \ && rm -rf /tmp/* CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"] RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \ && echo "Running test-stack@node" \ && chmod +x test-stack@node.sh \ && bash test-stack@node.sh \ && rm -rf test-stack@node.sh RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Ubuntu disco \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v10.23.1, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \ && chmod +x /bin/sh-shim \ && cp /bin/sh /bin/sh.real \ && mv /bin/sh-shim /bin/sh
Java
package de.mhus.cha.cao.action; import java.io.File; import de.mhus.lib.cao.CaoElement; import de.mhus.lib.cao.CaoException; import de.mhus.lib.cao.CaoList; import de.mhus.lib.cao.CaoMonitor; import de.mhus.lib.cao.CaoOperation; import de.mhus.cap.core.Access; import de.mhus.cha.cao.ChaConnection; import de.mhus.cha.cao.ChaElement; import de.mhus.lib.MFile; import de.mhus.lib.form.MForm; import de.mhus.lib.form.annotations.FormElement; import de.mhus.lib.form.annotations.FormSortId; @FormElement("name='cha_copy_to_folder' title='Copy'") public class CopyToOperation extends CaoOperation implements MForm { private CaoList<Access> sources; private ChaElement target; private ChaConnection connection; public CopyToOperation(ChaElement ChaElement) { target = ChaElement; } @Override public void dispose() throws CaoException { } @Override public void execute() throws CaoException { connection = (ChaConnection)target.getConnection(); //collect all affected entries monitor.beginTask("count", CaoMonitor.UNKNOWN); int cnt = 0; for (CaoElement<Access> element : sources.getElements()) { cnt = count( ((ChaElement)element).getFile(), cnt ); } monitor.beginTask("copy", cnt); cnt = 0; for (CaoElement<Access> element : sources.getElements()) { cnt = copy( target.getFile(), ((ChaElement)element).getFile(), cnt ); } } private int copy(File target, File file, int cnt) { // validate action if (monitor.isCanceled()) return cnt; if ( !file.isDirectory()) return cnt; // for secure // new path File newTarget = null; cnt++; monitor.worked(cnt); newTarget = new File(target,connection.createUID()); monitor.log().debug("Create Dir: " + newTarget.getAbsolutePath()); monitor.subTask(file.getAbsolutePath()); // validate path if ( newTarget.exists() ) { monitor.log().warn("Folder already exists: " + newTarget.getAbsolutePath()); return cnt; } // create if ( ! newTarget.mkdir() ) { newTarget = null; monitor.log().warn("Can't create folder: " + target.getAbsolutePath() + "/" + file.getName()); return cnt; } // set id connection.addIdPath(newTarget.getName(), newTarget.getAbsolutePath()); // events connection.fireElementCreated(newTarget.getName()); connection.fireElementLink(target.getName(), newTarget.getName()); // copy files for ( File sub : file.listFiles()) { if (sub.isFile()) { monitor.log().debug("Copy File: " + file.getAbsolutePath()); File targetFile = new File(target,file.getName()); if (targetFile.exists()) { monitor.log().warn("Can't overwrite file: " + file.getAbsolutePath()); } else if ( !MFile.copyFile(file, targetFile) ) { monitor.log().warn("Can't copy file: " + file.getAbsolutePath()); } } } // copy sub folders for ( File sub : file.listFiles(connection.getDefaultFileFilter())) { cnt = copy(newTarget, sub,cnt); } return cnt; } private int count(File file, int cnt) { if (monitor.isCanceled()) return cnt; if ( file.isDirectory() ) cnt++; if (!file.isDirectory()) return cnt; // for secure for ( File sub : file.listFiles(connection.getDefaultFileFilter())) { cnt = count(sub,cnt); } return cnt; } @Override public void initialize() throws CaoException { } public void setSources(CaoList<Access> list) { sources = list; } }
Java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.phoenix.util.csv; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Timestamp; import java.sql.Types; import java.util.Base64; import java.util.List; import java.util.Properties; import javax.annotation.Nullable; import org.apache.commons.csv.CSVRecord; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.expression.function.EncodeFormat; import org.apache.phoenix.jdbc.PhoenixConnection; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; import org.apache.phoenix.schema.IllegalDataException; import org.apache.phoenix.schema.types.PBinary; import org.apache.phoenix.schema.types.PBoolean; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDataType.PDataCodec; import org.apache.phoenix.schema.types.PTimestamp; import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.util.ColumnInfo; import org.apache.phoenix.util.DateUtil; import org.apache.phoenix.util.ReadOnlyProps; import org.apache.phoenix.util.UpsertExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; /** {@link UpsertExecutor} over {@link CSVRecord}s. */ public class CsvUpsertExecutor extends UpsertExecutor<CSVRecord, String> { private static final Logger LOG = LoggerFactory.getLogger(CsvUpsertExecutor.class); protected final String arrayElementSeparator; /** Testing constructor. Do not use in prod. */ @VisibleForTesting protected CsvUpsertExecutor(Connection conn, List<ColumnInfo> columnInfoList, PreparedStatement stmt, UpsertListener<CSVRecord> upsertListener, String arrayElementSeparator) { super(conn, columnInfoList, stmt, upsertListener); this.arrayElementSeparator = arrayElementSeparator; finishInit(); } public CsvUpsertExecutor(Connection conn, String tableName, List<ColumnInfo> columnInfoList, UpsertListener<CSVRecord> upsertListener, String arrayElementSeparator) { super(conn, tableName, columnInfoList, upsertListener); this.arrayElementSeparator = arrayElementSeparator; finishInit(); } @Override protected void execute(CSVRecord csvRecord) { try { if (csvRecord.size() < conversionFunctions.size()) { String message = String.format("CSV record does not have enough values (has %d, but needs %d)", csvRecord.size(), conversionFunctions.size()); throw new IllegalArgumentException(message); } for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) { Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex)); if (sqlValue != null) { preparedStatement.setObject(fieldIndex + 1, sqlValue); } else { preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType()); } } preparedStatement.execute(); upsertListener.upsertDone(++upsertCount); } catch (Exception e) { if (LOG.isDebugEnabled()) { // Even though this is an error we only log it with debug logging because we're notifying the // listener, and it can do its own logging if needed LOG.debug("Error on CSVRecord " + csvRecord, e); } upsertListener.errorOnRecord(csvRecord, e); } } @Override protected Function<String, Object> createConversionFunction(PDataType dataType) { if (dataType.isArrayType()) { return new ArrayDatatypeConversionFunction( new StringToArrayConverter( conn, arrayElementSeparator, PDataType.fromTypeId(dataType.getSqlType() - PDataType.ARRAY_TYPE_BASE))); } else { return new SimpleDatatypeConversionFunction(dataType, this.conn); } } /** * Performs typed conversion from String values to a given column value type. */ static class SimpleDatatypeConversionFunction implements Function<String, Object> { private final PDataType dataType; private final PDataCodec codec; private final DateUtil.DateTimeParser dateTimeParser; private final String binaryEncoding; SimpleDatatypeConversionFunction(PDataType dataType, Connection conn) { ReadOnlyProps props; try { props = conn.unwrap(PhoenixConnection.class).getQueryServices().getProps(); } catch (SQLException e) { throw new RuntimeException(e); } this.dataType = dataType; PDataCodec codec = dataType.getCodec(); if(dataType.isCoercibleTo(PTimestamp.INSTANCE)) { codec = DateUtil.getCodecFor(dataType); // TODO: move to DateUtil String dateFormat; int dateSqlType = dataType.getResultSetSqlType(); if (dateSqlType == Types.DATE) { dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); } else if (dateSqlType == Types.TIME) { dateFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); } else { dateFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT); } String timeZoneId = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, QueryServicesOptions.DEFAULT_DATE_FORMAT_TIMEZONE); this.dateTimeParser = DateUtil.getDateTimeParser(dateFormat, dataType, timeZoneId); } else { this.dateTimeParser = null; } this.codec = codec; this.binaryEncoding = props.get(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, QueryServicesOptions.DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING); } @Nullable @Override public Object apply(@Nullable String input) { if (input == null || input.isEmpty()) { return null; } if (dataType == PTimestamp.INSTANCE) { return DateUtil.parseTimestamp(input); } if (dateTimeParser != null) { long epochTime = dateTimeParser.parseDateTime(input); byte[] byteValue = new byte[dataType.getByteSize()]; codec.encodeLong(epochTime, byteValue, 0); return dataType.toObject(byteValue); } else if (dataType == PBoolean.INSTANCE) { switch (input.toLowerCase()) { case "true": case "t": case "1": return Boolean.TRUE; case "false": case "f": case "0": return Boolean.FALSE; default: throw new RuntimeException("Invalid boolean value: '" + input + "', must be one of ['true','t','1','false','f','0']"); } }else if (dataType == PVarbinary.INSTANCE || dataType == PBinary.INSTANCE){ EncodeFormat format = EncodeFormat.valueOf(binaryEncoding.toUpperCase()); Object object = null; switch (format) { case BASE64: object = Base64.getDecoder().decode(input); if (object == null) { throw new IllegalDataException( "Input: [" + input + "] is not base64 encoded"); } break; case ASCII: object = Bytes.toBytes(input); break; default: throw new IllegalDataException("Unsupported encoding \"" + binaryEncoding + "\""); } return object; } return dataType.toObject(input); } } /** * Converts string representations of arrays into Phoenix arrays of the correct type. */ private static class ArrayDatatypeConversionFunction implements Function<String, Object> { private final StringToArrayConverter arrayConverter; private ArrayDatatypeConversionFunction(StringToArrayConverter arrayConverter) { this.arrayConverter = arrayConverter; } @Nullable @Override public Object apply(@Nullable String input) { try { return arrayConverter.toArray(input); } catch (SQLException e) { throw new RuntimeException(e); } } } }
Java
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.reteoo.common; import org.drools.core.SessionConfiguration; import org.drools.core.WorkingMemoryEntryPoint; import org.drools.core.base.DroolsQuery; import org.drools.core.common.BaseNode; import org.drools.core.common.InternalAgenda; import org.drools.core.common.InternalFactHandle; import org.drools.core.common.InternalWorkingMemory; import org.drools.core.common.WorkingMemoryAction; import org.drools.core.event.AgendaEventSupport; import org.drools.core.event.RuleEventListenerSupport; import org.drools.core.event.RuleRuntimeEventSupport; import org.drools.core.impl.InternalKnowledgeBase; import org.drools.core.impl.StatefulKnowledgeSessionImpl; import org.drools.core.phreak.PropagationEntry; import org.drools.core.reteoo.LIANodePropagation; import org.drools.core.spi.FactHandleFactory; import org.drools.core.spi.PropagationContext; import org.kie.api.runtime.Environment; import org.kie.api.runtime.rule.AgendaFilter; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicBoolean; public class ReteWorkingMemory extends StatefulKnowledgeSessionImpl { private List<LIANodePropagation> liaPropagations; private Queue<WorkingMemoryAction> actionQueue; private AtomicBoolean evaluatingActionQueue = new AtomicBoolean(false); /** Flag to determine if a rule is currently being fired. */ private volatile AtomicBoolean firing = new AtomicBoolean(false); public ReteWorkingMemory() { } public ReteWorkingMemory(long id, InternalKnowledgeBase kBase) { super(id, kBase); } public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, boolean initInitFactHandle, SessionConfiguration config, Environment environment) { super(id, kBase, initInitFactHandle, config, environment); } public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, long propagationContext, SessionConfiguration config, InternalAgenda agenda, Environment environment) { super(id, kBase, handleFactory, propagationContext, config, agenda, environment); } public ReteWorkingMemory(long id, InternalKnowledgeBase kBase, FactHandleFactory handleFactory, InternalFactHandle initialFactHandle, long propagationContext, SessionConfiguration config, Environment environment, RuleRuntimeEventSupport workingMemoryEventSupport, AgendaEventSupport agendaEventSupport, RuleEventListenerSupport ruleEventListenerSupport, InternalAgenda agenda) { super(id, kBase, handleFactory, false, propagationContext, config, environment, workingMemoryEventSupport, agendaEventSupport, ruleEventListenerSupport, agenda); } @Override protected void init() { this.actionQueue = new ConcurrentLinkedQueue<WorkingMemoryAction>(); this.propagationList = new RetePropagationList(this); } @Override public void reset() { super.reset(); actionQueue.clear(); } @Override public void reset(int handleId, long handleCounter, long propagationCounter) { super.reset(handleId, handleCounter, propagationCounter ); if (liaPropagations != null) liaPropagations.clear(); actionQueue.clear(); } @Override public WorkingMemoryEntryPoint getWorkingMemoryEntryPoint(String name) { WorkingMemoryEntryPoint ep = this.entryPoints.get(name); return ep != null ? new ReteWorkingMemoryEntryPoint( this, ep ) : null; } public void addLIANodePropagation(LIANodePropagation liaNodePropagation) { if (liaPropagations == null) liaPropagations = new ArrayList<LIANodePropagation>(); liaPropagations.add( liaNodePropagation ); } private final Object syncLock = new Object(); public void initInitialFact() { if ( initialFactHandle == null ) { synchronized ( syncLock ) { if ( initialFactHandle == null ) { // double check, inside of sync point incase some other thread beat us to it. initInitialFact(kBase, null); } } } } @Override public void fireUntilHalt(final AgendaFilter agendaFilter) { initInitialFact(); super.fireUntilHalt( agendaFilter ); } @Override public int fireAllRules(final AgendaFilter agendaFilter, int fireLimit) { checkAlive(); if ( this.firing.compareAndSet( false, true ) ) { initInitialFact(); try { startOperation(); return internalFireAllRules(agendaFilter, fireLimit); } finally { endOperation(); this.firing.set( false ); } } return 0; } private int internalFireAllRules(AgendaFilter agendaFilter, int fireLimit) { int fireCount = 0; try { kBase.readLock(); // If we're already firing a rule, then it'll pick up the firing for any other assertObject(..) that get // nested inside, avoiding concurrent-modification exceptions, depending on code paths of the actions. if ( liaPropagations != null && isSequential() ) { for ( LIANodePropagation liaPropagation : liaPropagations ) { ( liaPropagation ).doPropagation( this ); } } // do we need to call this in advance? executeQueuedActionsForRete(); fireCount = this.agenda.fireAllRules( agendaFilter, fireLimit ); } finally { kBase.readUnlock(); if (kBase.flushModifications()) { fireCount += internalFireAllRules(agendaFilter, fireLimit); } } return fireCount; } @Override public void closeLiveQuery(final InternalFactHandle factHandle) { try { startOperation(); this.kBase.readLock(); this.lock.lock(); final PropagationContext pCtx = pctxFactory.createPropagationContext(getNextPropagationIdCounter(), PropagationContext.INSERTION, null, null, factHandle, getEntryPoint()); getEntryPointNode().retractQuery( factHandle, pCtx, this ); pCtx.evaluateActionQueue(this); getFactHandleFactory().destroyFactHandle( factHandle ); } finally { this.lock.unlock(); this.kBase.readUnlock(); endOperation(); } } @Override protected BaseNode[] evalQuery(String queryName, DroolsQuery queryObject, InternalFactHandle handle, PropagationContext pCtx) { initInitialFact(); BaseNode[] tnodes = kBase.getReteooBuilder().getTerminalNodesForQuery( queryName ); // no need to call retract, as no leftmemory used. getEntryPointNode().assertQuery( handle, pCtx, this ); pCtx.evaluateActionQueue( this ); return tnodes; } public Collection<WorkingMemoryAction> getActionQueue() { return actionQueue; } @Override public void queueWorkingMemoryAction(final WorkingMemoryAction action) { try { startOperation(); actionQueue.add(action); notifyWaitOnRest(); } finally { endOperation(); } } public void addPropagation(PropagationEntry propagationEntry) { if (propagationEntry instanceof WorkingMemoryAction) { actionQueue.add((WorkingMemoryAction) propagationEntry); } else { super.addPropagation(propagationEntry); } } @Override public void executeQueuedActionsForRete() { try { startOperation(); if ( evaluatingActionQueue.compareAndSet( false, true ) ) { try { if ( actionQueue!= null && !actionQueue.isEmpty() ) { WorkingMemoryAction action; while ( (action = actionQueue.poll()) != null ) { try { action.execute( (InternalWorkingMemory) this ); } catch ( Exception e ) { throw new RuntimeException( "Unexpected exception executing action " + action.toString(), e ); } } } } finally { evaluatingActionQueue.compareAndSet( true, false ); } } } finally { endOperation(); } } @Override public Iterator<? extends PropagationEntry> getActionsIterator() { return actionQueue.iterator(); } }
Java
# Senecillis veitchiana (Hemsl.) Kitam. SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
/* Copyright (c) DataStax, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "integration.hpp" /** * Prepared metadata related tests */ class PreparedMetadataTests : public Integration { public: void SetUp() { Integration::SetUp(); session_.execute( format_string(CASSANDRA_KEY_VALUE_TABLE_FORMAT, table_name_.c_str(), "int", "int")); session_.execute( format_string(CASSANDRA_KEY_VALUE_INSERT_FORMAT, table_name_.c_str(), "1", "99")); } /** * Check the column count of a bound statement before and after adding a * column to a table. * * @param session * @param expected_column_count_after_update */ void prepared_check_column_count_after_alter(Session session, size_t expected_column_count_after_update) { Statement bound_statement = session.prepare(format_string("SELECT * FROM %s WHERE key = 1", table_name_.c_str())) .bind(); // Verify that the table has two columns in the metadata { Result result = session.execute(bound_statement); EXPECT_EQ(2u, result.column_count()); } // Add a column to the table session.execute(format_string("ALTER TABLE %s ADD value2 int", table_name_.c_str())); // The column count should match the expected after the alter { Result result = session.execute(bound_statement); EXPECT_EQ(expected_column_count_after_update, result.column_count()); } } }; /** * Verify that the column count of a bound statement's result metadata doesn't * change for older protocol versions (v4 and less) when a table's schema is altered. * * @since 2.8 */ CASSANDRA_INTEGRATION_TEST_F(PreparedMetadataTests, AlterDoesntUpdateColumnCount) { CHECK_FAILURE; // Ensure beta protocol is not set Session session = default_cluster() .with_beta_protocol(false) .with_protocol_version(CASS_PROTOCOL_VERSION_V4) .connect(keyspace_name_); // The column count will stay the same even after the alter prepared_check_column_count_after_alter(session, 2u); } /** * Verify that the column count of a bound statement's result metadata is * properly updated for newer protocol versions (v5 and greater) when a table's * schema is altered. * * @since 2.8 */ CASSANDRA_INTEGRATION_TEST_F(PreparedMetadataTests, AlterProperlyUpdatesColumnCount) { CHECK_FAILURE; CHECK_VERSION(4.0.0); // Ensure protocol v5 or greater Session session = default_cluster().with_beta_protocol(true).connect(keyspace_name_); // The column count will properly update after the alter prepared_check_column_count_after_alter(session, 3u); }
Java
<!DOCTYPE html> <html> <!-- Copyright 2008 The Closure Library Authors. All Rights Reserved. Use of this source code is governed by the Apache License, Version 2.0. See the COPYING file for details. --> <!-- --> <head> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>Closure Unit Tests - goog.ui.SliderBase</title> <script src="../base.js"></script> <script type="text/javascript"> goog.require('goog.dom'); goog.require('goog.a11y.aria'); goog.require('goog.a11y.aria.State'); goog.require('goog.dom.classes'); goog.require('goog.events'); goog.require('goog.events.EventType'); goog.require('goog.events.KeyCodes'); goog.require('goog.fx.Animation'); goog.require('goog.math.Coordinate'); goog.require('goog.style'); goog.require('goog.style.bidi'); goog.require('goog.testing.MockClock'); goog.require('goog.testing.MockControl'); goog.require('goog.testing.events'); goog.require('goog.testing.jsunit'); goog.require('goog.testing.mockmatchers'); goog.require('goog.testing.recordFunction'); goog.require('goog.ui.Component'); goog.require('goog.ui.SliderBase'); goog.require('goog.userAgent'); </script> <style type="text/css"> #oneThumbSlider { position: relative; width: 1000px; background: grey; height: 20px; } #oneThumbSlider.test-slider-vertical { height: 1000px; width: 20px; } #twoThumbSlider { position: relative; /* Extra 20px is so distance between thumb centers is 1000px */ width: 1020px; } #valueThumb, #extentThumb { position: absolute; width: 20px; } #thumb { position: absolute; width: 20px; height: 20px; background: black; top: 5px; } .test-slider-vertical > #thumb { left: 5px; top: auto; } #rangeHighlight { position: absolute; } </style> </head> <body> <div id="sandbox"></div> <script type="text/javascript"> var oneThumbSlider; var oneThumbSliderRtl; var oneChangeEventCount; var twoThumbSlider; var twoThumbSliderRtl; var twoChangeEventCount; var mockClock; var mockAnimation; /** * A basic class to implement the abstract goog.ui.SliderBase for testing. * @constructor * @extends {goog.ui.SliderBase} */ function OneThumbSlider() { goog.ui.SliderBase.call(this); } goog.inherits(OneThumbSlider, goog.ui.SliderBase); /** {@override} */ OneThumbSlider.prototype.createThumbs = function() { this.valueThumb = this.extentThumb = goog.dom.getElement('thumb'); }; /** {@override} */ OneThumbSlider.prototype.getCssClass = function(orientation) { return goog.getCssName('test-slider', orientation); }; /** * A basic class to implement the abstract goog.ui.SliderBase for testing. * @constructor * @extends {goog.ui.SliderBase} */ function TwoThumbSlider() { goog.ui.SliderBase.call(this); } goog.inherits(TwoThumbSlider, goog.ui.SliderBase); /** {@override} */ TwoThumbSlider.prototype.createThumbs = function() { this.valueThumb = goog.dom.getElement('valueThumb'); this.extentThumb = goog.dom.getElement('extentThumb'); this.rangeHighlight = goog.dom.getElement('rangeHighlight'); }; /** {@override} */ TwoThumbSlider.prototype.getCssClass = function(orientation) { return goog.getCssName('test-slider', orientation); }; /** * Basic class that implements the AnimationFactory interface for testing. * @param {!goog.fx.Animation|!Array.<!goog.fx.Animation>} testAnimations The * test animations to use. * @constructor * @implements {goog.ui.SliderBase.AnimationFactory} */ function AnimationFactory(testAnimations) { this.testAnimations = testAnimations; } /** @override */ AnimationFactory.prototype.createAnimations = function() { return this.testAnimations; }; function setUp() { var sandBox = goog.dom.getElement('sandbox'); mockClock = new goog.testing.MockClock(true); var oneThumbElem = goog.dom.createDom( 'div', {'id': 'oneThumbSlider'}, goog.dom.createDom('span', {'id': 'thumb'})); sandBox.appendChild(oneThumbElem); oneThumbSlider = new OneThumbSlider(); oneThumbSlider.decorate(oneThumbElem); oneChangeEventCount = 0; goog.events.listen(oneThumbSlider, goog.ui.Component.EventType.CHANGE, function() { oneChangeEventCount++; }); var twoThumbElem = goog.dom.createDom( 'div', {'id': 'twoThumbSlider'}, goog.dom.createDom('div', {'id': 'rangeHighlight'}), goog.dom.createDom('span', {'id': 'valueThumb'}), goog.dom.createDom('span', {'id': 'extentThumb'})); sandBox.appendChild(twoThumbElem); twoThumbSlider = new TwoThumbSlider(); twoThumbSlider.decorate(twoThumbElem); twoChangeEventCount = 0; goog.events.listen(twoThumbSlider, goog.ui.Component.EventType.CHANGE, function() { twoChangeEventCount++; }); var sandBoxRtl = goog.dom.createDom('div', {'dir': 'rtl', 'style': 'position:absolute;'}); sandBox.appendChild(sandBoxRtl); var oneThumbElemRtl = goog.dom.createDom( 'div', {'id': 'oneThumbSliderRtl'}, goog.dom.createDom('span', {'id': 'thumbRtl'})); sandBoxRtl.appendChild(oneThumbElemRtl); oneThumbSliderRtl = new OneThumbSlider(); oneThumbSliderRtl.enableFlipForRtl(true); oneThumbSliderRtl.decorate(oneThumbElemRtl); goog.events.listen(oneThumbSliderRtl, goog.ui.Component.EventType.CHANGE, function() { oneChangeEventCount++; }); var twoThumbElemRtl = goog.dom.createDom( 'div', {'id': 'twoThumbSliderRtl'}, goog.dom.createDom('div', {'id': 'rangeHighlightRtl'}), goog.dom.createDom('span', {'id': 'valueThumbRtl'}), goog.dom.createDom('span', {'id': 'extentThumbRtl'})); sandBoxRtl.appendChild(twoThumbElemRtl); twoThumbSliderRtl = new TwoThumbSlider(); twoThumbSliderRtl.enableFlipForRtl(true); twoThumbSliderRtl.decorate(twoThumbElemRtl); twoChangeEventCount = 0; goog.events.listen(twoThumbSliderRtl, goog.ui.Component.EventType.CHANGE, function() { twoChangeEventCount++; }); } function tearDown() { <<<<<<< HEAD goog.events.removeAll(); ======= goog.events.removeAllNativeListeners(); >>>>>>> newgitrepo oneThumbSlider.dispose(); twoThumbSlider.dispose(); oneThumbSliderRtl.dispose(); twoThumbSliderRtl.dispose(); mockClock.dispose(); goog.dom.getElement('sandbox').innerHTML = ''; } function testGetAndSetValue() { oneThumbSlider.setValue(30); assertEquals(30, oneThumbSlider.getValue()); assertEquals('Setting valid value must dispatch only a single change event.', 1, oneChangeEventCount); oneThumbSlider.setValue(30); assertEquals(30, oneThumbSlider.getValue()); assertEquals('Setting to same value must not dispatch change event.', 1, oneChangeEventCount); oneThumbSlider.setValue(-30); assertEquals('Setting invalid value must not change value.', 30, oneThumbSlider.getValue()); assertEquals('Setting invalid value must not dispatch change event.', 1, oneChangeEventCount); // Value thumb can't go past extent thumb, so we must move that first to // allow setting value. twoThumbSlider.setExtent(70); twoChangeEventCount = 0; twoThumbSlider.setValue(60); assertEquals(60, twoThumbSlider.getValue()); assertEquals('Setting valid value must dispatch only a single change event.', 1, twoChangeEventCount); twoThumbSlider.setValue(60); assertEquals(60, twoThumbSlider.getValue()); assertEquals('Setting to same value must not dispatch change event.', 1, twoChangeEventCount); twoThumbSlider.setValue(-60); assertEquals('Setting invalid value must not change value.', 60, twoThumbSlider.getValue()); assertEquals('Setting invalid value must not dispatch change event.', 1, twoChangeEventCount); } function testGetAndSetValueRtl() { var thumbElement = goog.dom.getElement('thumbRtl'); assertEquals(0, goog.style.bidi.getOffsetStart(thumbElement)); assertEquals('', thumbElement.style.left); assertTrue(thumbElement.style.right >= 0); oneThumbSliderRtl.setValue(30); assertEquals(30, oneThumbSliderRtl.getValue()); assertEquals('Setting valid value must dispatch only a single change event.', 1, oneChangeEventCount); assertEquals('', thumbElement.style.left); assertTrue(thumbElement.style.right >= 0); oneThumbSliderRtl.setValue(30); assertEquals(30, oneThumbSliderRtl.getValue()); assertEquals('Setting to same value must not dispatch change event.', 1, oneChangeEventCount); oneThumbSliderRtl.setValue(-30); assertEquals('Setting invalid value must not change value.', 30, oneThumbSliderRtl.getValue()); assertEquals('Setting invalid value must not dispatch change event.', 1, oneChangeEventCount); // Value thumb can't go past extent thumb, so we must move that first to // allow setting value. var valueThumbElement = goog.dom.getElement('valueThumbRtl'); var extentThumbElement = goog.dom.getElement('extentThumbRtl'); assertEquals(0, goog.style.bidi.getOffsetStart(valueThumbElement)); assertEquals(0, goog.style.bidi.getOffsetStart(extentThumbElement)); assertEquals('', valueThumbElement.style.left); assertTrue(valueThumbElement.style.right >= 0); assertEquals('', extentThumbElement.style.left); assertTrue(extentThumbElement.style.right >= 0); twoThumbSliderRtl.setExtent(70); twoChangeEventCount = 0; twoThumbSliderRtl.setValue(60); assertEquals(60, twoThumbSliderRtl.getValue()); assertEquals('Setting valid value must dispatch only a single change event.', 1, twoChangeEventCount); twoThumbSliderRtl.setValue(60); assertEquals(60, twoThumbSliderRtl.getValue()); assertEquals('Setting to same value must not dispatch change event.', 1, twoChangeEventCount); assertEquals('', valueThumbElement.style.left); assertTrue(valueThumbElement.style.right >= 0); assertEquals('', extentThumbElement.style.left); assertTrue(extentThumbElement.style.right >= 0); twoThumbSliderRtl.setValue(-60); assertEquals('Setting invalid value must not change value.', 60, twoThumbSliderRtl.getValue()); assertEquals('Setting invalid value must not dispatch change event.', 1, twoChangeEventCount); } function testGetAndSetExtent() { // Note(user): With a one thumb slider the API only really makes sense if you // always use setValue since there is no extent. twoThumbSlider.setExtent(7); assertEquals(7, twoThumbSlider.getExtent()); assertEquals('Setting valid value must dispatch only a single change event.', 1, twoChangeEventCount); twoThumbSlider.setExtent(7); assertEquals(7, twoThumbSlider.getExtent()); assertEquals('Setting to same value must not dispatch change event.', 1, twoChangeEventCount); twoThumbSlider.setExtent(-7); assertEquals('Setting invalid value must not change value.', 7, twoThumbSlider.getExtent()); assertEquals('Setting invalid value must not dispatch change event.', 1, twoChangeEventCount); } function testUpdateValueExtent() { twoThumbSlider.setValueAndExtent(30, 50); assertNotNull(twoThumbSlider.getElement()); assertEquals('Setting value results in updating aria-valuenow', '30', goog.a11y.aria.getState(twoThumbSlider.getElement(), goog.a11y.aria.State.VALUENOW)); assertEquals(30, twoThumbSlider.getValue()); assertEquals(50, twoThumbSlider.getExtent()); } function testRangeListener() { var slider = new goog.ui.SliderBase; slider.updateUi_ = slider.updateAriaStates = function() {}; slider.rangeModel.setValue(0); var f = goog.testing.recordFunction(); goog.events.listen(slider, goog.ui.Component.EventType.CHANGE, f); slider.rangeModel.setValue(50); assertEquals(1, f.getCallCount()); slider.exitDocument(); slider.rangeModel.setValue(0); assertEquals('The range model listener should not have been removed so we ' + 'should have gotten a second event dispatch', 2, f.getCallCount()); } /** * Verifies that rangeHighlight position and size are correct for the given * startValue and endValue. Assumes slider has default min/max values [0, 100], * width of 1020px, and thumb widths of 20px, with rangeHighlight drawn from * the centers of the thumbs. * @param {number} rangeHighlight The range highlight. * @param {number} startValue The start value. * @param {number} endValue The end value. */ function assertHighlightedRange(rangeHighlight, startValue, endValue) { var rangeStr = '[' + startValue + ', ' + endValue + ']'; var rangeStart = 10 + 10 * startValue; assertEquals('Range highlight for ' + rangeStr + ' should start at ' + rangeStart + 'px.', rangeStart, rangeHighlight.offsetLeft); var rangeSize = 10 * (endValue - startValue); assertEquals('Range highlight for ' + rangeStr + ' should have size ' + rangeSize + 'px.', rangeSize, rangeHighlight.offsetWidth); } function testKeyHandlingTests() { twoThumbSlider.setValue(0); twoThumbSlider.setExtent(100); assertEquals(0, twoThumbSlider.getValue()); assertEquals(100, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.RIGHT); assertEquals(1, twoThumbSlider.getValue()); assertEquals(99, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.RIGHT); assertEquals(2, twoThumbSlider.getValue()); assertEquals(98, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.LEFT); assertEquals(1, twoThumbSlider.getValue()); assertEquals(98, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.LEFT); assertEquals(0, twoThumbSlider.getValue()); assertEquals(98, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.RIGHT, { shiftKey: true }); assertEquals(10, twoThumbSlider.getValue()); assertEquals(90, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.RIGHT, { shiftKey: true }); assertEquals(20, twoThumbSlider.getValue()); assertEquals(80, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.LEFT, { shiftKey: true }); assertEquals(10, twoThumbSlider.getValue()); assertEquals(80, twoThumbSlider.getExtent()); goog.testing.events.fireKeySequence( twoThumbSlider.getElement(), goog.events.KeyCodes.LEFT, { shiftKey: true }); assertEquals(0, twoThumbSlider.getValue()); assertEquals(80, twoThumbSlider.getExtent()); } function testKeyHandlingRtl() { twoThumbSliderRtl.setValue(0); twoThumbSliderRtl.setExtent(100); assertEquals(0, twoThumbSliderRtl.getValue()); assertEquals(100, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.RIGHT); assertEquals(0, twoThumbSliderRtl.getValue()); assertEquals(99, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.RIGHT); assertEquals(0, twoThumbSliderRtl.getValue()); assertEquals(98, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.LEFT); assertEquals(1, twoThumbSliderRtl.getValue()); assertEquals(98, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.LEFT); assertEquals(2, twoThumbSliderRtl.getValue()); assertEquals(98, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.RIGHT, { shiftKey: true }); assertEquals(0, twoThumbSliderRtl.getValue()); assertEquals(90, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.RIGHT, { shiftKey: true }); assertEquals(0, twoThumbSliderRtl.getValue()); assertEquals(80, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.LEFT, { shiftKey: true }); assertEquals(10, twoThumbSliderRtl.getValue()); assertEquals(80, twoThumbSliderRtl.getExtent()); goog.testing.events.fireKeySequence( twoThumbSliderRtl.getElement(), goog.events.KeyCodes.LEFT, { shiftKey: true }); assertEquals(20, twoThumbSliderRtl.getValue()); assertEquals(80, twoThumbSliderRtl.getExtent()); } function testRangeHighlight() { var rangeHighlight = goog.dom.getElement('rangeHighlight'); // Test [0, 100] twoThumbSlider.setValue(0); twoThumbSlider.setExtent(100); assertHighlightedRange(rangeHighlight, 0, 100); // Test [25, 75] twoThumbSlider.setValue(25); twoThumbSlider.setExtent(50); assertHighlightedRange(rangeHighlight, 25, 75); // Test [50, 50] twoThumbSlider.setValue(50); twoThumbSlider.setExtent(0); assertHighlightedRange(rangeHighlight, 50, 50); } function testRangeHighlightAnimation() { var animationDelay = 160; // Delay in ms, is a bit higher than actual delay. if (goog.userAgent.IE) { // For some reason, (probably due to how timing works), IE7 and IE8 will not // stop if we don't wait for it. animationDelay = 250; } var rangeHighlight = goog.dom.getElement('rangeHighlight'); twoThumbSlider.setValue(0); twoThumbSlider.setExtent(100); // Animate right thumb, final range is [0, 75] twoThumbSlider.animatedSetValue(75); assertHighlightedRange(rangeHighlight, 0, 100); mockClock.tick(animationDelay); assertHighlightedRange(rangeHighlight, 0, 75); // Animate left thumb, final range is [25, 75] twoThumbSlider.animatedSetValue(25); assertHighlightedRange(rangeHighlight, 0, 75); mockClock.tick(animationDelay); assertHighlightedRange(rangeHighlight, 25, 75); } /** * Verifies that no error occurs and that the range highlight is sized correctly * for a zero-size slider (i.e. doesn't attempt to set a negative size). The * test tries to resize the slider from its original size to 0, then checks * that the range highlight's size is correctly set to 0. * * The size verification is needed because Webkit/Gecko outright ignore calls * to set negative sizes on an element, leaving it at its former size. IE * throws an error in the same situation. */ function testRangeHighlightForZeroSizeSlider() { // Make sure range highlight spans whole slider before zeroing width. twoThumbSlider.setExtent(100); twoThumbSlider.getElement().style.width = 0; // The setVisible call is used to force a UI update. twoThumbSlider.setVisible(true); assertEquals('Range highlight size should be 0 when slider size is 0', 0, goog.dom.getElement('rangeHighlight').offsetWidth); } function testAnimatedSetValueAnimatesFactoryCreatedAnimations() { // Create and set the factory. var ignore = goog.testing.mockmatchers.ignoreArgument; var mockControl = new goog.testing.MockControl(); var mockAnimation1 = mockControl.createLooseMock(goog.fx.Animation); var mockAnimation2 = mockControl.createLooseMock(goog.fx.Animation); var testAnimations = [mockAnimation1, mockAnimation2]; oneThumbSlider.setAdditionalAnimations(new AnimationFactory(testAnimations)); // Expect the animations to be played. mockAnimation1.play(false); mockAnimation2.play(false); mockAnimation1.addEventListener(ignore, ignore, ignore); mockAnimation2.addEventListener(ignore, ignore, ignore); // Animate and verify. mockControl.$replayAll(); oneThumbSlider.animatedSetValue(50); mockControl.$verifyAll(); mockControl.$resetAll(); mockControl.$tearDown(); } function testMouseWheelEventHandlerEnable() { // Mouse wheel handling should be enabled by default. assertTrue(oneThumbSlider.isHandleMouseWheel()); // Test disabling the mouse wheel handler oneThumbSlider.setHandleMouseWheel(false); assertFalse(oneThumbSlider.isHandleMouseWheel()); // Test that enabling again works fine. oneThumbSlider.setHandleMouseWheel(true); assertTrue(oneThumbSlider.isHandleMouseWheel()); // Test that mouse wheel handling can be disabled before rendering a slider. var wheelDisabledElem = goog.dom.createDom( 'div', {}, goog.dom.createDom('span')); var wheelDisabledSlider = new OneThumbSlider(); wheelDisabledSlider.setHandleMouseWheel(false); wheelDisabledSlider.decorate(wheelDisabledElem); assertFalse(wheelDisabledSlider.isHandleMouseWheel()); } function testDisabledAndEnabledSlider() { // Check that a slider is enabled by default assertTrue(oneThumbSlider.isEnabled()); var listenerCount = oneThumbSlider.getHandler().getListenerCount(); // Disable the slider and check its state oneThumbSlider.setEnabled(false); assertFalse(oneThumbSlider.isEnabled()); assertTrue(goog.dom.classes.has( oneThumbSlider.getElement(), 'goog-slider-disabled')); assertEquals(0, oneThumbSlider.getHandler().getListenerCount()); // setValue should work unaffected even when the slider is disabled. oneThumbSlider.setValue(30); assertEquals(30, oneThumbSlider.getValue()); assertEquals('Setting valid value must dispatch a change event ' + 'even when slider is disabled.', 1, oneChangeEventCount); // Test the transition from disabled to enabled oneThumbSlider.setEnabled(true); assertTrue(oneThumbSlider.isEnabled()); assertFalse(goog.dom.classes.has( oneThumbSlider.getElement(), 'goog-slider-disabled')); assertTrue(listenerCount == oneThumbSlider.getHandler().getListenerCount()); } function testBlockIncrementingWithEnableAndDisabled() { var doc = goog.dom.getOwnerDocument(oneThumbSlider.getElement()); // Case when slider is not disabled between the mouse down and up events. goog.testing.events.fireMouseDownEvent(oneThumbSlider.getElement()); assertEquals(1, goog.events.getListeners( oneThumbSlider.getElement(), goog.events.EventType.MOUSEMOVE, false).length); assertEquals(1, goog.events.getListeners( doc, goog.events.EventType.MOUSEUP, true).length); goog.testing.events.fireMouseUpEvent(oneThumbSlider.getElement()); assertEquals(0, goog.events.getListeners( oneThumbSlider.getElement(), goog.events.EventType.MOUSEMOVE, false).length); assertEquals(0, goog.events.getListeners( doc, goog.events.EventType.MOUSEUP, true).length); // Case when the slider is disabled between the mouse down and up events. goog.testing.events.fireMouseDownEvent(oneThumbSlider.getElement()); assertEquals(1, goog.events.getListeners( oneThumbSlider.getElement(), goog.events.EventType.MOUSEMOVE, false).length); assertEquals(1, goog.events.getListeners(doc, goog.events.EventType.MOUSEUP, true).length); oneThumbSlider.setEnabled(false); assertEquals(0, goog.events.getListeners( oneThumbSlider.getElement(), goog.events.EventType.MOUSEMOVE, false).length); assertEquals(0, goog.events.getListeners( doc, goog.events.EventType.MOUSEUP, true).length); assertEquals(1, oneThumbSlider.getHandler().getListenerCount()); goog.testing.events.fireMouseUpEvent(oneThumbSlider.getElement()); assertEquals(0, goog.events.getListeners( oneThumbSlider.getElement(), goog.events.EventType.MOUSEMOVE, false).length); assertEquals(0, goog.events.getListeners( doc, goog.events.EventType.MOUSEUP, true).length); } function testMouseClickWithMoveToPointEnabled() { var stepSize = 20; oneThumbSlider.setStep(stepSize); oneThumbSlider.setMoveToPointEnabled(true); var initialValue = oneThumbSlider.getValue(); // Figure out the number of pixels per step. var numSteps = Math.round( (oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum()) / stepSize); var size = goog.style.getSize(oneThumbSlider.getElement()); var pixelsPerStep = Math.round(size.width / numSteps); var coords = goog.style.getClientPosition(oneThumbSlider.getElement()); coords.x += pixelsPerStep / 2; // Case when value is increased goog.testing.events.fireClickSequence(oneThumbSlider.getElement(), /* opt_button */ undefined, coords); assertEquals(oneThumbSlider.getValue(), initialValue + stepSize); // Case when value is decreased goog.testing.events.fireClickSequence(oneThumbSlider.getElement(), /* opt_button */ undefined, coords); assertEquals(oneThumbSlider.getValue(), initialValue); // Case when thumb is clicked goog.testing.events.fireClickSequence(oneThumbSlider.getElement()); assertEquals(oneThumbSlider.getValue(), initialValue); } <<<<<<< HEAD ======= function testNonIntegerStepSize() { var stepSize = 0.02; oneThumbSlider.setStep(stepSize); oneThumbSlider.setMinimum(-1); oneThumbSlider.setMaximum(1); oneThumbSlider.setValue(0.7); assertRoughlyEquals(0.7, oneThumbSlider.getValue(), 0.000001); oneThumbSlider.setValue(0.3); assertRoughlyEquals(0.3, oneThumbSlider.getValue(), 0.000001); } >>>>>>> newgitrepo /** * Tests getThumbCoordinateForValue method. */ function testThumbCoordinateForValueWithHorizontalSlider() { // Make sure the y-coordinate stays the same for the horizontal slider. var originalY = goog.style.getPosition(oneThumbSlider.valueThumb).y; var width = oneThumbSlider.getElement().clientWidth - oneThumbSlider.valueThumb.offsetWidth; var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum(); // Verify coordinate for a particular value. var value = 20; var expectedX = Math.round(value / range * width); var expectedCoord = new goog.math.Coordinate(expectedX, originalY); var coord = oneThumbSlider.getThumbCoordinateForValue(value); assertObjectEquals(expectedCoord, coord); // Verify this works regardless of current position. oneThumbSlider.setValue(value / 2); coord = oneThumbSlider.getThumbCoordinateForValue(value); assertObjectEquals(expectedCoord, coord); } function testThumbCoordinateForValueWithVerticalSlider() { // Make sure the x-coordinate stays the same for the vertical slider. oneThumbSlider.setOrientation(goog.ui.SliderBase.Orientation.VERTICAL); var originalX = goog.style.getPosition(oneThumbSlider.valueThumb).x; var height = oneThumbSlider.getElement().clientHeight - oneThumbSlider.valueThumb.offsetHeight; var range = oneThumbSlider.getMaximum() - oneThumbSlider.getMinimum(); // Verify coordinate for a particular value. var value = 20; var expectedY = height - Math.round(value / range * height); var expectedCoord = new goog.math.Coordinate(originalX, expectedY); var coord = oneThumbSlider.getThumbCoordinateForValue(value); assertObjectEquals(expectedCoord, coord); // Verify this works regardless of current position. oneThumbSlider.setValue(value / 2); coord = oneThumbSlider.getThumbCoordinateForValue(value); assertObjectEquals(expectedCoord, coord); } /** * Tests getValueFromMousePosition method. */ function testValueFromMousePosition() { var value = 30; oneThumbSlider.setValue(value); var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb); var size = goog.style.getSize(oneThumbSlider.valueThumb); offset.x += size.width / 2; offset.y += size.height / 2; var e = null; goog.events.listen(oneThumbSlider, goog.events.EventType.MOUSEMOVE, function(evt) { e = evt; }); goog.testing.events.fireMouseMoveEvent(oneThumbSlider, offset); assertNotEquals(e, null); assertEquals( value, Math.round(oneThumbSlider.getValueFromMousePosition(e))); // Verify this works regardless of current position. oneThumbSlider.setValue(value / 2); assertEquals( value, Math.round(oneThumbSlider.getValueFromMousePosition(e))); } /** * Tests dragging events. */ function testDragEvents() { var offset = goog.style.getPageOffset(oneThumbSlider.valueThumb); var size = goog.style.getSize(oneThumbSlider.valueThumb); offset.x += size.width / 2; offset.y += size.height / 2; var event_types = []; var handler = function(evt) { event_types.push(evt.type); }; goog.events.listen(oneThumbSlider, [goog.ui.SliderBase.EventType.DRAG_START, goog.ui.SliderBase.EventType.DRAG_END, goog.ui.SliderBase.EventType.DRAG_VALUE_START, goog.ui.SliderBase.EventType.DRAG_VALUE_END, goog.ui.SliderBase.EventType.DRAG_EXTENT_START, goog.ui.SliderBase.EventType.DRAG_EXTENT_END, goog.ui.Component.EventType.CHANGE], handler); // Since the order of the events between value and extent is not guaranteed // accross browsers, we need to allow for both here and once we have // them all, make sure that they were different. function isValueOrExtentDragStart(type) { return type == goog.ui.SliderBase.EventType.DRAG_VALUE_START || type == goog.ui.SliderBase.EventType.DRAG_EXTENT_START; }; function isValueOrExtentDragEnd(type) { return type == goog.ui.SliderBase.EventType.DRAG_VALUE_END || type == goog.ui.SliderBase.EventType.DRAG_EXTENT_END; }; // Test that dragging the thumb calls all the correct events. goog.testing.events.fireMouseDownEvent(oneThumbSlider.valueThumb); offset.x += 100; goog.testing.events.fireMouseMoveEvent(oneThumbSlider.valueThumb, offset); goog.testing.events.fireMouseUpEvent(oneThumbSlider.valueThumb); assertEquals(9, event_types.length); assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[0]); assertTrue(isValueOrExtentDragStart(event_types[1])); assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[2]); assertTrue(isValueOrExtentDragStart(event_types[3])); assertEquals(goog.ui.Component.EventType.CHANGE, event_types[4]); assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[5]); assertTrue(isValueOrExtentDragEnd(event_types[6])); assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[7]); assertTrue(isValueOrExtentDragEnd(event_types[8])); assertFalse(event_types[1] == event_types[3]); assertFalse(event_types[6] == event_types[8]); // Test that clicking the thumb without moving the mouse does not cause a // CHANGE event between DRAG_START/DRAG_END. event_types = []; goog.testing.events.fireMouseDownEvent(oneThumbSlider.valueThumb); goog.testing.events.fireMouseUpEvent(oneThumbSlider.valueThumb); assertEquals(8, event_types.length); assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[0]); assertTrue(isValueOrExtentDragStart(event_types[1])); assertEquals(goog.ui.SliderBase.EventType.DRAG_START, event_types[2]); assertTrue(isValueOrExtentDragStart(event_types[3])); assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[4]); assertTrue(isValueOrExtentDragEnd(event_types[5])); assertEquals(goog.ui.SliderBase.EventType.DRAG_END, event_types[6]); assertTrue(isValueOrExtentDragEnd(event_types[7])); assertFalse(event_types[1] == event_types[3]); assertFalse(event_types[5] == event_types[7]); // Early listener removal, do not wait for tearDown, to avoid building up // arrays of events unnecessarilly in further tests. goog.events.removeAll(oneThumbSlider); } </script> </body> </html>
Java
# Verrucaria floerkeana f. congregata (Hepp) Zahlbr. FORM #### Status ACCEPTED #### According to Index Fungorum #### Published in Cat. Lich. Univers. 1: 40 (1921) #### Original name Verrucaria papillosa f. congregata Hepp ### Remarks null
Java
package jp.hashiwa.elasticsearch.authplugin; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.rest.*; import java.util.*; import java.util.regex.Pattern; import java.util.stream.Stream; public class AuthRestHandler implements RestHandler { private final Logger logger = Loggers.getLogger(AuthRestHandler.class); private final RestHandler originalHandler; private final RestResponse unauthorizedResponse = new RestResponse() { @Override public String contentType() { return "application/json"; } @Override public BytesReference content() { return new BytesArray(""); } @Override public RestStatus status() { return RestStatus.UNAUTHORIZED; } }; private final Map<RestRequest.Method, Stream<Pattern>> authPatterns = new HashMap<RestRequest.Method, Stream<Pattern>>() { { this.put(RestRequest.Method.POST, Stream.of( Pattern.compile("^/testindex(/.*)?$") )); this.put(RestRequest.Method.PUT, Stream.of( Pattern.compile("^/testindex(/.*)?$") )); // all methods this.put(null, Stream.of( Pattern.compile("^/adminindex(/.*)?$") )); } }; AuthRestHandler(RestHandler restHandler) { this.originalHandler = restHandler; } @Override public void handleRequest(RestRequest restRequest, RestChannel restChannel, NodeClient nodeClient) throws Exception { this.logger.debug(restRequest.path()); this.logger.debug(restRequest.rawPath()); if (isOk(restRequest)) { this.originalHandler.handleRequest(restRequest, restChannel, nodeClient); } else { restChannel.sendResponse(unauthorizedResponse); } } private boolean needAuth(RestRequest.Method method, String path) { if (authPatterns.containsKey(method)) { Stream<Pattern> patterns = authPatterns.get(method); boolean match = patterns.anyMatch( p -> p.matcher(path).matches() ); return match; } return false; } private boolean isOk(RestRequest restRequest) { RestRequest.Method method = restRequest.method(); String path = restRequest.path(); // use rawpath() ? boolean needAuth = needAuth(method, path) || needAuth(null, path); if (! needAuth) { return true; } for (java.util.Map.Entry<String, String> entry: restRequest.headers()) { String key = entry.getKey(); String value = entry.getValue(); if (key.equals("user") && value.equals("admin")) { return true; } } return false; // ES 5.4 // return restRequest.getHeaders().get("user").equals("admin"); } }
Java
#pragma once #include "il2cpp-config.h" #ifndef _MSC_VER # include <alloca.h> #else # include <malloc.h> #endif #include <stdint.h> // System.Object struct Il2CppObject; // System.IAsyncResult struct IAsyncResult_t537683269; // System.AsyncCallback struct AsyncCallback_t1363551830; #include "mscorlib_System_MulticastDelegate2585444626.h" #include "mscorlib_System_Int322847414787.h" #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Winvalid-offsetof" #pragma clang diagnostic ignored "-Wunused-variable" #endif // System.Collections.Generic.Dictionary`2/Transform`1<System.Int32,System.Object,System.Object> struct Transform_1_t4035712581 : public MulticastDelegate_t2585444626 { public: public: }; #ifdef __clang__ #pragma clang diagnostic pop #endif
Java
/* * Copyright (c) 2010 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. See accompanying LICENSE file. */ package io.s4.persist; import io.s4.util.clock.Clock; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import org.apache.log4j.Logger; public class ConMapPersister implements Persister { private AtomicInteger persistCount = new AtomicInteger(0); private boolean selfClean = false; private int cleanWaitTime = 40; // 20 seconds by default private String loggerName = "s4"; ConcurrentHashMap<String, CacheEntry> cache; Clock s4Clock; private int startCapacity = 5000; public void setStartCapacity(int startCapacity) { this.startCapacity = startCapacity; } public int getStartCapacity() { return startCapacity; } public void setSelfClean(boolean selfClean) { this.selfClean = selfClean; } public void setCleanWaitTime(int cleanWaitTime) { this.cleanWaitTime = cleanWaitTime; } public void setLoggerName(String loggerName) { this.loggerName = loggerName; } public ConMapPersister(Clock s4Clock) { this.s4Clock = s4Clock; } public void setS4Clock(Clock s4Clock) { this.s4Clock = s4Clock; } public ConMapPersister() { } public void init() { cache = new ConcurrentHashMap<String, CacheEntry>(this.getStartCapacity()); if (selfClean) { Runnable r = new Runnable() { public void run() { while (!Thread.interrupted()) { int cleanCount = ConMapPersister.this.cleanOutGarbage(); Logger.getLogger(loggerName).info("Cleaned out " + cleanCount + " entries; Persister has " + cache.size() + " entries"); try { Thread.sleep(cleanWaitTime * 1000); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } } }; Thread t = new Thread(r); t.start(); t.setPriority(Thread.MIN_PRIORITY); } } public int getQueueSize() { return 0; } public int getPersistCount() { return persistCount.get(); } public int getCacheEntryCount() { return cache.size(); } public void setAsynch(String key, Object value, int period) { // there really is no asynch for the local cache set(key, value, period); } public void set(String key, Object value, int period) { persistCount.getAndIncrement(); CacheEntry ce = new CacheEntry(); ce.value = value; ce.period = period; ce.addTime = s4Clock.getCurrentTime(); cache.put(key, ce); } public Object get(String key) { CacheEntry ce = cache.get(key); if (ce == null) { return null; } if (ce.isExpired()) { return null; } return ce.value; } public Map<String, Object> getBulk(String[] keys) { HashMap map = new HashMap<String, Object>(); for (String key : keys) { Object value = get(key); if (value != null) { map.put(key, value); } } return map; } public Object getObject(String key) { return get(key); } public Map<String, Object> getBulkObjects(String[] keys) { return getBulk(keys); } public void remove(String key) { cache.remove(key); } public int cleanOutGarbage() { int count = 0; for (Enumeration en = cache.keys(); en.hasMoreElements();) { String key = (String) en.nextElement(); CacheEntry ce = cache.get(key); if (ce != null && ce.isExpired()) { count++; cache.remove(key); } } return count; } public Set<String> keySet() { return cache.keySet(); } public class CacheEntry { Object value; long addTime; int period; public boolean isExpired() { if (period > 0) { if ((addTime + (1000 * (long) period)) <= s4Clock.getCurrentTime()) { return true; } } return false; } } }
Java
--- layout: post title: Spring基础 subtitle: date: 2017-12-19 author: Felix header-img: img/home-bg-art.jpg catalog: true tags: - hibernate --- # 说明 以下内容为初学hibernate的体会,该框架的深入使用还未了解 ## 什么是hibernate 1.hibernate是一个对象关系映射框架,是对JDBC的轻量级封装。基于这两个问题,所以在以下的jar包使用中,要使用到的至少包括hibernate相关包,以及数据库处理相关包。 2.关系和对象的映射,就是把数据库中的关系映射到类的对象中。数据库中存储的数据实际上是一种实体与实体之间的关系。而当数据存储到数据库中时,就是持久化的过程。 ## jar包(-.-指版本号) 以下是学习中用到的jar包 ```swift 1.antlr-.-jar 2.classmate-.-jar 3.dom4j-.-jar 4.hibernate-commons-annotations-.-jar 5.hibernate-core-.-jar 6.hibernate-jpa-.-api-.-jar 7.jandex-.-jar 8.javassist-logging-.-jar 9.jboss-logging-.-jar 10.jboss-transaction-api-.-jar //操作mysql所需jar包 11.mysql-connector-jar-.-jar ``` ## .hbm.xml文件 该文件是用来映射关系与对象(一个pojo类和一个表的映射关系),与需要映射的对象放在同一文件夹下即可,命名方式为:*.hbm.xml,其中*为要映射的类名。 文件头: ```swift <!DOCTYPE hibernate-mapping PUBLIC "-//Hibernate/Hibernate Mapping DTD 3.0//EN" "http://www.hibernate.org/dtd/hibernate-mapping-3.0.dtd"> ``` 在`<hibernate-mapping>`标签中定义关系的`class,table,id,type,property`等属性 ## config文件 即hibernate.cfg.xml,该文件放置在class目录/src目录下即可, ```swift <!-- 当调用Configuration cfg = new Configuration().configure()时,自动搜索本文件,并将其读取到内存中,作为后续操作的基础配置 --> <!DOCTYPE hibernate-configuration PUBLIC "-//Hibernate/Hibernate Configuration DTD 3.0//EN" "http://www.hibernate.org/dtd/hibernate-configuration-3.0.dtd"> <hibernate-configuration> <!-- 产生操作数据库的session工厂 --> <session-factory> <!-- 连接数据库的URL --> <property name="connection.url">jdbc:mysql://localhost:3306/hiberstudy</property> <!-- 连接数据库的用户名 --> <property name="connection.username">root</property> <!-- 连接数据库的密码 --> <property name="connection.password">123456</property> <!-- 连接数据库的驱动类 --> <property name="connection.diver_class">com.mysql.jdbc.Driver</property> <!-- 数据库方言 --> <property name="hibernate.dialect">org.hibernate.dialect.MySQLDialect</property> <!-- 显示hibernate对数据库操作语句 --> <property name="hibernate.show_sql">true</property> <!-- 自动创建/更新/验证数据库表结构 --> <property name="hibernate.hbm2ddl.auto">create</property> <!-- 批量操作 --> <property name="hibernate.jdbc.batch_size">50</property><!-- 设置批量尺寸 --> <property name="hibernate.cache.use_second_level_cache">false</property><!-- 关闭二级缓存 --> <property name="hibernate.query.factory_class"> org.hibernate.hql.ast.ASTQueryTranslatorFactory</property><!-- 设置HQL/SQL查询翻译器属性 更新/删除操作都需要设置 --> <!-- 类与表的注册文件,下面是自己在学习过程中的注册文件,--> <mapping resource="com/lzf/vo/User.hbm.xml"/> </session-factory> </hibernate-configuration> ``` ## 使用过程 ```swift //整个hibernate程序的启动类,如果config配置文件放置在默认路径下,会自动加载不需要带参数 Configuration cfg = new Configuration(); //获得session对象的工厂,保存了对应当前数据库配置的所用映射 SesstionFactory sessionFactory = cfg.configure().buildSessionFactory(); //准备应用session对象来操作数据库,该接口提供了众多持久化方法,如增删改查(非线程安全) Session session = sessionFactory.openSession(); //事务操作, Transaction t= session.beginTransaction(); //只有commit之后,才会在数据库中得到更新 t.commit(); ```
Java
# # Copyright 2015, SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # shared_examples "a request class" do |with_body| before(:each) do Crowbar::Client::Config.configure( Crowbar::Client::Config.defaults.merge( server: "http://crowbar:80" ) ) end it "provides a method value" do expect(subject.method).to( eq(method) ) end it "provides a specific url" do expect(subject.url).to( eq(url) ) end it "provides a valid payload" do expect(subject.content).to( eq(params) ) end it "submits payload to an API" do content = if with_body params else "" end allow(Crowbar::Client::Request::Rest).to receive(:new).and_return( Crowbar::Client::Request::Rest.new( url: url, auth_type: nil ) ) stub_request( method, "http://crowbar:80/#{url}" ).to_return( status: 200, body: "", headers: {} ) subject.process expect( Crowbar::Client::Request::Rest.new(url: url).send( method, content ).code ).to eq(200) end end
Java
--TEST-- swoole_server/ssl: dtls --SKIPIF-- <?php require __DIR__ . '/../../include/skipif.inc'; ?> --FILE-- <?php require __DIR__ . '/../../include/bootstrap.php'; $pm = new SwooleTest\ProcessManager; $pm->parentFunc = function ($pid) use ($pm) { $client = new Swoole\Client(SWOOLE_SOCK_UDP | SWOOLE_SSL, SWOOLE_SOCK_SYNC); //同步阻塞 if (!$client->connect('127.0.0.1', $pm->getFreePort())) { exit("connect failed\n"); } $client->send("hello world"); Assert::same($client->recv(), "Swoole hello world"); $pm->kill(); }; $pm->childFunc = function () use ($pm) { $serv = new Swoole\Server('127.0.0.1', $pm->getFreePort(), SWOOLE_BASE, SWOOLE_SOCK_UDP | SWOOLE_SSL); $serv->set([ 'log_file' => '/dev/null', 'ssl_cert_file' => SSL_FILE_DIR . '/server.crt', 'ssl_key_file' => SSL_FILE_DIR . '/server.key', ]); $serv->on("workerStart", function ($serv) use ($pm) { $pm->wakeup(); }); $serv->on('receive', function ($serv, $fd, $tid, $data) { $serv->send($fd, "Swoole $data"); }); $serv->on('packet', function ($serv, $fd, $tid, $data) { $serv->send($fd, "Swoole $data"); }); $serv->start(); }; $pm->childFirst(); $pm->run(); ?> --EXPECT--
Java
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/orchestration/airflow/service/v1/environments.proto package com.google.cloud.orchestration.airflow.service.v1; /** * * * <pre> * The configuration of Cloud SQL instance that is used by the Apache Airflow * software. * </pre> * * Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig} */ public final class DatabaseConfig extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig) DatabaseConfigOrBuilder { private static final long serialVersionUID = 0L; // Use DatabaseConfig.newBuilder() to construct. private DatabaseConfig(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private DatabaseConfig() { machineType_ = ""; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new DatabaseConfig(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private DatabaseConfig( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { java.lang.String s = input.readStringRequireUtf8(); machineType_ = s; break; } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass .internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass .internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class, com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class); } public static final int MACHINE_TYPE_FIELD_NUMBER = 1; private volatile java.lang.Object machineType_; /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @return The machineType. */ @java.lang.Override public java.lang.String getMachineType() { java.lang.Object ref = machineType_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); machineType_ = s; return s; } } /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @return The bytes for machineType. */ @java.lang.Override public com.google.protobuf.ByteString getMachineTypeBytes() { java.lang.Object ref = machineType_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); machineType_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, machineType_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(machineType_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, machineType_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig)) { return super.equals(obj); } com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other = (com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) obj; if (!getMachineType().equals(other.getMachineType())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + MACHINE_TYPE_FIELD_NUMBER; hash = (53 * hash) + getMachineType().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder( com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * The configuration of Cloud SQL instance that is used by the Apache Airflow * software. * </pre> * * Protobuf type {@code google.cloud.orchestration.airflow.service.v1.DatabaseConfig} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.orchestration.airflow.service.v1.DatabaseConfig) com.google.cloud.orchestration.airflow.service.v1.DatabaseConfigOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass .internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass .internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.class, com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.Builder.class); } // Construct using com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); machineType_ = ""; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.orchestration.airflow.service.v1.EnvironmentsOuterClass .internal_static_google_cloud_orchestration_airflow_service_v1_DatabaseConfig_descriptor; } @java.lang.Override public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig getDefaultInstanceForType() { return com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance(); } @java.lang.Override public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig build() { com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig buildPartial() { com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig result = new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig(this); result.machineType_ = machineType_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) { return mergeFrom((com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom( com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig other) { if (other == com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig.getDefaultInstance()) return this; if (!other.getMachineType().isEmpty()) { machineType_ = other.machineType_; onChanged(); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private java.lang.Object machineType_ = ""; /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @return The machineType. */ public java.lang.String getMachineType() { java.lang.Object ref = machineType_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); machineType_ = s; return s; } else { return (java.lang.String) ref; } } /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @return The bytes for machineType. */ public com.google.protobuf.ByteString getMachineTypeBytes() { java.lang.Object ref = machineType_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); machineType_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @param value The machineType to set. * @return This builder for chaining. */ public Builder setMachineType(java.lang.String value) { if (value == null) { throw new NullPointerException(); } machineType_ = value; onChanged(); return this; } /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @return This builder for chaining. */ public Builder clearMachineType() { machineType_ = getDefaultInstance().getMachineType(); onChanged(); return this; } /** * * * <pre> * Optional. Cloud SQL machine type used by Airflow database. * It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 * or db-n1-standard-16. If not specified, db-n1-standard-2 will be used. * </pre> * * <code>string machine_type = 1 [(.google.api.field_behavior) = OPTIONAL];</code> * * @param value The bytes for machineType to set. * @return This builder for chaining. */ public Builder setMachineTypeBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); machineType_ = value; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig) } // @@protoc_insertion_point(class_scope:google.cloud.orchestration.airflow.service.v1.DatabaseConfig) private static final com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig(); } public static com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<DatabaseConfig> PARSER = new com.google.protobuf.AbstractParser<DatabaseConfig>() { @java.lang.Override public DatabaseConfig parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new DatabaseConfig(input, extensionRegistry); } }; public static com.google.protobuf.Parser<DatabaseConfig> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<DatabaseConfig> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.orchestration.airflow.service.v1.DatabaseConfig getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
Java
/******************************************************************************* * Copyright 2016 Francesco Calimeri, Davide Fusca', Simona Perri and Jessica Zangari * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* * GroundingPreferences.h * * Created on: Mar 10, 2016 * Author: jessica */ #ifndef SRC_GROUNDER_STATEMENT_GROUNDINGPREFERENCES_H_ #define SRC_GROUNDER_STATEMENT_GROUNDINGPREFERENCES_H_ #include <unordered_map> #include "Rule.h" #include <iostream> #include <vector> #include <list> using namespace std; namespace DLV2 { namespace grounder { struct HashAtomPointer{ inline size_t operator()(Atom* obj) const { return size_t(obj); } inline bool operator()(Atom* obj1, Atom* obj2) const { return obj1==obj2; } }; enum AnnotationsError {OK, ATOM_NOT_PRESENT, ARITY_ERROR, CONFLICT_FOUND}; typedef unordered_map<Atom*,vector<unsigned>,HashAtomPointer,HashAtomPointer> unordered_map_pointers_atom_arguments; typedef unordered_map<Atom*,vector<unsigned>,HashForTable<Atom>,HashForTable<Atom>> unordered_map_atom_arguments; class GroundingPreferences { public: bool addRuleOrderingType(Rule* rule, unsigned orderingType); bool addRuleProjectionType(Rule* rule, unsigned pType){ rulesProjectionTypes.insert({rule->getIndex(), pType}); return true; } void addRuleRewArith(Rule* rule){ rulesRewArith.insert(rule->getIndex()); } void addRuleLookAhead(Rule* rule){ rulesLookAhead.insert(rule->getIndex()); } void addRuleAlignSubstitutions(Rule* rule){ rulesAlignSubstitutions.insert(rule->getIndex()); } AnnotationsError addRuleAtomIndexingSetting(Rule* rule, Atom* atom, vector<unsigned>& arguments); void addRulePartialOrder(Rule* rule){rulesPartialOrders[rule->getIndex()].emplace_back();rulesPartialOrdersAtoms[rule->getIndex()].emplace_back();} AnnotationsError addRulePartialOrderAtom(Rule* rule, Atom* atom); AnnotationsError checkRulePartialOrderConflicts(Rule* rule); AnnotationsError applyRulePartialOrder(Rule* rule); bool addGlobalOrderingType(unsigned orderingType); void addGlobalAtomIndexingSetting(Atom* atom, vector<unsigned>& arguments); void addGlobalPartialOrder(){ globalPartialOrdersAtoms.emplace_back();} void addGlobalPartialOrderAtomStart(Atom* atom); void addGlobalPartialOrderAtomEnd(Atom* atom); int getOrderingType(Rule* r) ; pair<bool,int> getProjectionType(Rule* r){ auto i =r->getIndex(); if(rulesProjectionTypes.count(i)) return {true,rulesProjectionTypes[i]}; return {false,-1}; } bool getRewArith(Rule* r){ return rulesRewArith.count(r->getIndex()); } bool getLookAhead(Rule* r){ return rulesLookAhead.count(r->getIndex()); } bool getAlignSubstitutions(Rule* r){ return rulesAlignSubstitutions.count(r->getIndex()); } bool checkPartialOrder(Rule* rule,unsigned atomPosition,const list<unsigned>& atoms) ; bool checkAtomIndexed(Rule* rule,Atom* atom,const vector<unsigned>& possibileArgs, vector<unsigned>& idxTerms) ; static GroundingPreferences* getGroundingPreferences() { if(groundingPreferences==0) groundingPreferences=new GroundingPreferences(); return groundingPreferences; } ~GroundingPreferences(){}; static void freeInstance(){ delete groundingPreferences;} static void checkIfAtomIsPresentInRule(Rule* rule, Atom* atom, vector<unsigned>& positions); void print(Rule* rule) const; private: unordered_map<unsigned,unsigned> rulesOrderingTypes; unordered_map<unsigned,unsigned> rulesProjectionTypes; unordered_set<unsigned> rulesRewArith; unordered_set<unsigned> rulesLookAhead; unordered_set<unsigned> rulesAlignSubstitutions; unordered_map<unsigned,unordered_map_pointers_atom_arguments> rulesAtomsIndexed; unordered_map<unsigned,vector<vector<bool>>> rulesPartialOrders; unordered_map<unsigned,vector<vector<Atom*>>> rulesPartialOrdersAtoms; int globalOrderingType; unordered_map_atom_arguments globalAtomsIndexed; vector<list<Atom*>> globalPartialOrdersAtoms; bool applayedGlobalAnnotations; bool applyGlobalAtomIndexingSetting(); bool applyGlobalPartialOrder(); void setGlobalAnnotations(); GroundingPreferences():globalOrderingType(-1),applayedGlobalAnnotations(false){}; static GroundingPreferences* groundingPreferences; }; } /* namespace grounder */ } /* namespace DLV2 */ #endif /* SRC_GROUNDER_STATEMENT_GROUNDINGPREFERENCES_H_ */
Java
# Zymonema album C.W. Dodge, 1935 SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
package com.bagri.server.hazelcast.task.schema; import static com.bagri.core.Constants.pn_schema_password; import static com.bagri.server.hazelcast.serialize.TaskSerializationFactory.cli_UpdateSchemaTask; import static com.bagri.support.security.Encryptor.encrypt; import java.io.IOException; import java.util.Properties; import java.util.Map.Entry; import com.bagri.core.system.Schema; import com.hazelcast.nio.ObjectDataInput; import com.hazelcast.nio.ObjectDataOutput; import com.hazelcast.nio.serialization.IdentifiedDataSerializable; public class SchemaUpdater extends SchemaProcessor implements IdentifiedDataSerializable { private boolean override; private Properties properties; public SchemaUpdater() { // } public SchemaUpdater(int version, String admin, boolean override, Properties properties) { super(version, admin); this.override = override; this.properties = properties; } @Override public Object process(Entry<String, Schema> entry) { logger.debug("process.enter; entry: {}", entry); if (entry.getValue() != null) { Schema schema = entry.getValue(); if (schema.getVersion() == getVersion()) { //if (schema.isActive()) { // if (denitSchemaInCluster(schema) > 0) { // don't go further // return null; // } //} if (override) { String pwd = properties.getProperty(pn_schema_password); if (pwd != null) { properties.setProperty(pn_schema_password, encrypt(pwd)); } schema.setProperties(properties); } else { for (String name: properties.stringPropertyNames()) { String value = properties.getProperty(name); if (pn_schema_password.equals(name)) { value = encrypt(value); } schema.setProperty(name, value); } } //if (schema.isActive()) { // if (initSchemaInCluster(schema) == 0) { // schema.setActive(false); // } //} schema.updateVersion(getAdmin()); entry.setValue(schema); auditEntity(AuditType.update, schema); return schema; } } return null; } @Override public int getId() { return cli_UpdateSchemaTask; } @Override public void readData(ObjectDataInput in) throws IOException { super.readData(in); override = in.readBoolean(); properties = in.readObject(); } @Override public void writeData(ObjectDataOutput out) throws IOException { super.writeData(out); out.writeBoolean(override); out.writeObject(properties); } }
Java
package uk.co.bluegecko.core.swing.table.rendering; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.awt.Color; import java.awt.Font; import org.junit.Before; import org.junit.Test; public class RenderingHintTest { private Font font; private Color color; @Before public final void setUp() { font = Font.decode( "Monospaced-12" ); color = new Color( 0x808080 ); } @Test public final void testWeightExceeds() { final FontHint min = new FontHint( HintWeight.MIN_WEIGHT ); final FontHint low = new FontHint( HintWeight.LOW_WEIGHT ); final FontHint def = new FontHint( HintWeight.DEFAULT_WEIGHT ); final FontHint selected = new FontHint( HintWeight.SELECTED_WEIGHT ); final FontHint high = new FontHint( HintWeight.HIGH_WEIGHT ); final FontHint focused = new FontHint( HintWeight.FOCUSED_WEIGHT ); final FontHint max = new FontHint( HintWeight.MAX_WEIGHT ); assertFalse( "min-min", min.exceeds( min ) ); assertFalse( "min-low", min.exceeds( low ) ); assertTrue( "low-min", low.exceeds( min ) ); assertTrue( "default-low", def.exceeds( low ) ); assertTrue( "selected-default", selected.exceeds( def ) ); assertTrue( "high-selected", high.exceeds( selected ) ); assertTrue( "focused-high", focused.exceeds( high ) ); assertTrue( "max-focused", max.exceeds( focused ) ); } @Test public final void testGetValueNone() { assertEquals( font, new FontHint( HintWeight.MAX_WEIGHT ).getValue( font ) ); assertNull( new FontHint( HintWeight.MAX_WEIGHT ).getValue() ); } @Test public final void testGetValueNonDerived() { final Font value = Font.decode( "Monospaced-BOLD-14" ); assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue( font ) ); assertEquals( value, new FontHint( HintWeight.MAX_WEIGHT, value ).getValue() ); } @Test public final void testGetValueDerived() { final Font value = Font.decode( "Monospaced-14" ); final FontHint fontHint = new FontHint( HintWeight.MAX_WEIGHT ) { private static final long serialVersionUID = 1L; @Override protected Font derive( final Font original ) { return original.deriveFont( 14.0f ); } }; assertEquals( value, fontHint.getValue( font ) ); assertNull( fontHint.getValue() ); } @Test public final void testFontHintSize() { final Font value = Font.decode( "Monospaced-14" ); assertEquals( value, FontHint.size( HintWeight.MAX_WEIGHT, 14 ) .getValue( font ) ); } @Test public final void testFontHintLarger() { final Font value = Font.decode( "Monospaced-14" ); assertEquals( value, FontHint.larger( HintWeight.MAX_WEIGHT, 2 ) .getValue( font ) ); } @Test public final void testFontHintSmaller() { final Font value = Font.decode( "Monospaced-10" ); assertEquals( value, FontHint.smaller( HintWeight.MAX_WEIGHT, 2 ) .getValue( font ) ); } @Test public final void testFontHintScaled() { final Font value = Font.decode( "Monospaced-6" ); assertEquals( value, FontHint.scaled( HintWeight.MAX_WEIGHT, 0.5f ) .getValue( font ) ); } @Test public final void testFontHintStyle() { final Font value = Font.decode( "Monospaced-BOLD-12" ); assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD ) .getValue( font ) ); } @Test public final void testFontHintStyleAndSize() { final Font value = Font.decode( "Monospaced-BOLD-14" ); assertEquals( value, FontHint.style( HintWeight.MAX_WEIGHT, Font.BOLD, 14 ) .getValue( font ) ); } @Test public final void testForegroundHintDarker() { final Color value = new Color( 0x595959 ); assertEquals( value, ForegroundHint.darker( HintWeight.MAX_WEIGHT ) .getValue( color ) ); } @Test public final void testForegroundHintBrighter() { final Color value = new Color( 0xB6B6B6 ); assertEquals( value, ForegroundHint.brighter( HintWeight.MAX_WEIGHT ) .getValue( color ) ); } @Test public final void testBackgroundHintDarker() { final Color value = new Color( 0x595959 ); assertEquals( value, BackgroundHint.darker( HintWeight.MAX_WEIGHT ) .getValue( color ) ); } @Test public final void testBackgroundHintBrighter() { final Color value = new Color( 0xB6B6B6 ); assertEquals( value, BackgroundHint.brighter( HintWeight.MAX_WEIGHT ) .getValue( color ) ); } }
Java
# AUTOGENERATED FILE FROM balenalib/beaglebone-green-gateway-ubuntu:cosmic-build # remove several traces of debian python RUN apt-get purge -y python.* # http://bugs.python.org/issue19846 # > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK. ENV LANG C.UTF-8 # key 63C7CC90: public key "Simon McVittie <smcv@pseudorandom.co.uk>" imported # key 3372DCFA: public key "Donald Stufft (dstufft) <donald@stufft.io>" imported RUN gpg --keyserver keyring.debian.org --recv-keys 4DE8FF2A63C7CC90 \ && gpg --keyserver keyserver.ubuntu.com --recv-key 6E3CBCE93372DCFA \ && gpg --keyserver keyserver.ubuntu.com --recv-keys 0x52a43a1e4b77b059 ENV PYTHON_VERSION 3.5.10 # if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'" ENV PYTHON_PIP_VERSION 21.0.1 ENV SETUPTOOLS_VERSION 56.0.0 RUN set -x \ && curl -SLO "http://resin-packages.s3.amazonaws.com/python/v$PYTHON_VERSION/Python-$PYTHON_VERSION.linux-armv7hf-openssl1.1.tar.gz" \ && echo "4abc87b995e08c143de14f26d8ab6ffd9017aad400bf91bc36a802efda7fe27a Python-$PYTHON_VERSION.linux-armv7hf-openssl1.1.tar.gz" | sha256sum -c - \ && tar -xzf "Python-$PYTHON_VERSION.linux-armv7hf-openssl1.1.tar.gz" --strip-components=1 \ && rm -rf "Python-$PYTHON_VERSION.linux-armv7hf-openssl1.1.tar.gz" \ && ldconfig \ && if [ ! -e /usr/local/bin/pip3 ]; then : \ && curl -SLO "https://raw.githubusercontent.com/pypa/get-pip/430ba37776ae2ad89f794c7a43b90dc23bac334c/get-pip.py" \ && echo "19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c get-pip.py" | sha256sum -c - \ && python3 get-pip.py \ && rm get-pip.py \ ; fi \ && pip3 install --no-cache-dir --upgrade --force-reinstall pip=="$PYTHON_PIP_VERSION" setuptools=="$SETUPTOOLS_VERSION" \ && find /usr/local \ \( -type d -a -name test -o -name tests \) \ -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ -exec rm -rf '{}' + \ && cd / \ && rm -rf /usr/src/python ~/.cache # install "virtualenv", since the vast majority of users of this image will want it RUN pip3 install --no-cache-dir virtualenv ENV PYTHON_DBUS_VERSION 1.2.8 # install dbus-python dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ libdbus-1-dev \ libdbus-glib-1-dev \ && rm -rf /var/lib/apt/lists/* \ && apt-get -y autoremove # install dbus-python RUN set -x \ && mkdir -p /usr/src/dbus-python \ && curl -SL "http://dbus.freedesktop.org/releases/dbus-python/dbus-python-$PYTHON_DBUS_VERSION.tar.gz" -o dbus-python.tar.gz \ && curl -SL "http://dbus.freedesktop.org/releases/dbus-python/dbus-python-$PYTHON_DBUS_VERSION.tar.gz.asc" -o dbus-python.tar.gz.asc \ && gpg --verify dbus-python.tar.gz.asc \ && tar -xzC /usr/src/dbus-python --strip-components=1 -f dbus-python.tar.gz \ && rm dbus-python.tar.gz* \ && cd /usr/src/dbus-python \ && PYTHON_VERSION=$(expr match "$PYTHON_VERSION" '\([0-9]*\.[0-9]*\)') ./configure \ && make -j$(nproc) \ && make install -j$(nproc) \ && cd / \ && rm -rf /usr/src/dbus-python # make some useful symlinks that are expected to exist RUN cd /usr/local/bin \ && ln -sf pip3 pip \ && { [ -e easy_install ] || ln -s easy_install-* easy_install; } \ && ln -sf idle3 idle \ && ln -sf pydoc3 pydoc \ && ln -sf python3 python \ && ln -sf python3-config python-config # set PYTHONPATH to point to dist-packages ENV PYTHONPATH /usr/lib/python3/dist-packages:$PYTHONPATH CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"] RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@python.sh" \ && echo "Running test-stack@python" \ && chmod +x test-stack@python.sh \ && bash test-stack@python.sh \ && rm -rf test-stack@python.sh RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Ubuntu cosmic \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nPython v3.5.10, Pip v21.0.1, Setuptools v56.0.0 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \ && chmod +x /bin/sh-shim \ && cp /bin/sh /bin/sh.real \ && mv /bin/sh-shim /bin/sh
Java
/* * Copyright 2015-2016 DevCon5 GmbH, info@devcon5.ch * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.devcon5.cli; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import org.junit.Test; /** */ public class CLIExample { @CliOption(value = "x", hasArg = true) private String example; @CliOptionGroup private Structured credentials; private String postProcessed; @PostInject private void init(){ postProcessed = "an " + example; } @Test public void example() { //arrange String[] exampleArgs = {"-u", "hans", "-p", "wurst", "-x", "example"}; //act CLI.parse(exampleArgs).into(this); run(); //assert assertEquals("an example", postProcessed); } public void run() { assertThat(example, is(not(nullValue()))); assertThat(credentials.user, is(not(nullValue()))); assertThat(credentials.password, is(not(nullValue()))); } static class Structured { @CliOption(value = "u", hasArg = true) private String user; @CliOption(value = "p", hasArg = true) private String password; } }
Java
# -*- coding: utf-8 -*- """ Authors: Tim Hessels UNESCO-IHE 2016 Contact: t.hessels@unesco-ihe.org Repository: https://github.com/wateraccounting/wa Module: Collect/MOD17 Description: This module downloads MOD17 GPP data from http://e4ftl01.cr.usgs.gov/. Use the MOD17.GPP_8daily function to download and create 8 daily GPP images in Gtiff format. The data is available between 2000-02-18 till present. Examples: from wa.Collect import MOD17 MOD17.GPP_8daily(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20', latlim=[41, 45], lonlim=[-8, -5]) MOD17.NPP_yearly(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-20', latlim=[41, 45], lonlim=[-8, -5]) """ from .GPP_8daily import main as GPP_8daily from .NPP_yearly import main as NPP_yearly __all__ = ['GPP_8daily', 'NPP_yearly'] __version__ = '0.1'
Java
/* * Copyright 2015-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.docksidestage.app.web.product; import org.apache.commons.lang3.builder.ToStringBuilder; import org.docksidestage.dbflute.allcommon.CDef; import org.hibernate.validator.constraints.Length; /** * @author jflute */ public class ProductSearchForm { @Length(max = 10) // #simple_for_example just for validtion example public String productName; public CDef.ProductStatus productStatus; @Length(max = 5) // #simple_for_example just for validtion example public String purchaseMemberName; @Override public String toString() { return ToStringBuilder.reflectionToString(this); } }
Java
/** * @fileoverview Defines compressors utility methods. * * @see https://google.github.io/styleguide/javascriptguide.xml * @see https://developers.google.com/closure/compiler/docs/js-for-compiler * @module glize/compressors */ import * as lzw from 'lzw-compressor'; /** * Enumeration of available compression types. * @enum {string} */ export const TYPE = { LZW: 'lzw' }; /** * Compress data string using specified compression type. * @param {string} data Data to compress. * @param {string=} [opt_type=TYPE.LZW] Optional compression type. * @return {string} Returns compressed data. * @method * @example * const result = compress( * 'Any string of any length. Any string of any length. Any string of any length.'); * console.log(result); * //> Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ. */ export const compress = (data, opt_type = TYPE.LZW) => { let result = ''; if (TYPE.LZW === opt_type) { result = lzw.compress(data); } return result; }; /** * Decompress data string using specified compression type. * @param {string} data Data to compress. * @param {string=} [opt_type=TYPE.LZW] Optional compression type. * @return {string} Returns compressed data. * @method * @example * const result = decompress('Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ.'); * console.log(result); * //> Any string of any length. Any string of any length. Any string of any length. */ export const decompress = (data, opt_type = TYPE.LZW) => { let result = ''; if (TYPE.LZW === opt_type) { result = lzw.decompress(data); } return result; };
Java
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use thread::Mutex; use mem::{replace, transmute}; use kinds::{Freeze, Send, marker}; use clone::{Clone, DeepClone}; use ops::Drop; use cmp::{Eq, Ord}; use atomic::{atomic_fence_acq, atomic_xadd_relaxed, atomic_xsub_rel}; struct ArcBox<T> { value: T, count: int } #[unsafe_no_drop_flag] pub struct Arc<T> { ptr: *mut ArcBox<T> } impl<T: Send + Freeze> Arc<T> { #[inline(always)] pub fn new(value: T) -> Arc<T> { unsafe { Arc::new_unchecked(value) } } } impl<T> Arc<T> { pub unsafe fn new_unchecked(value: T) -> Arc<T> { Arc{ptr: transmute(~ArcBox{value: value, count: 1})} } } impl<T> Arc<T> { #[inline(always)] pub fn borrow<'a>(&'a self) -> &'a T { unsafe { &(*self.ptr).value } } } // Reasoning behind the atomic memory ordering: // http://www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html #[unsafe_destructor] impl<T> Drop for Arc<T> { fn drop(&mut self) { if self.ptr != 0 as *mut ArcBox<T> { unsafe { if atomic_xsub_rel(&mut (*self.ptr).count, 1) == 1 { atomic_fence_acq(); let _: ~ArcBox<T> = transmute(self.ptr); } } } } } impl<T> Clone for Arc<T> { fn clone(&self) -> Arc<T> { unsafe { atomic_xadd_relaxed(&mut (*self.ptr).count, 1); Arc { ptr: self.ptr } } } } impl<T: DeepClone> DeepClone for Arc<T> { fn deep_clone(&self) -> Arc<T> { unsafe { Arc::new_unchecked(self.borrow().deep_clone()) } } } impl<T: Eq> Eq for Arc<T> { #[inline(always)] fn eq(&self, other: &Arc<T>) -> bool { *self.borrow() == *other.borrow() } #[inline(always)] fn ne(&self, other: &Arc<T>) -> bool { *self.borrow() != *other.borrow() } } impl<T: Ord> Ord for Arc<T> { #[inline(always)] fn lt(&self, other: &Arc<T>) -> bool { *self.borrow() < *other.borrow() } #[inline(always)] fn le(&self, other: &Arc<T>) -> bool { *self.borrow() <= *other.borrow() } #[inline(always)] fn gt(&self, other: &Arc<T>) -> bool { *self.borrow() > *other.borrow() } #[inline(always)] fn ge(&self, other: &Arc<T>) -> bool { *self.borrow() >= *other.borrow() } } struct MutexArcBox<T> { mutex: Mutex, value: T, no_freeze: marker::NoFreeze } pub struct MutexArc<T> { ptr: Arc<MutexArcBox<T>> } impl<T: Send> MutexArc<T> { pub fn new(value: T) -> MutexArc<T> { let b = MutexArcBox { mutex: Mutex::new(), value: value, no_freeze: marker::NoFreeze }; unsafe { MutexArc { ptr: Arc::new_unchecked(b) } } } pub fn swap(&self, value: T) -> T { unsafe { let ptr: &mut MutexArcBox<T> = transmute(self.ptr.borrow()); let _guard = ptr.mutex.lock_guard(); replace(&mut ptr.value, value) } } } impl<T> Clone for MutexArc<T> { #[inline(always)] fn clone(&self) -> MutexArc<T> { MutexArc { ptr: self.ptr.clone() } } }
Java
# AUTOGENERATED FILE FROM balenalib/imx8mm-var-dart-fedora:33-build ENV NODE_VERSION 15.6.0 ENV YARN_VERSION 1.22.4 RUN for key in \ 6A010C5166006599AA17F08146C2130DFD2497F5 \ ; do \ gpg --keyserver pgp.mit.edu --recv-keys "$key" || \ gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \ gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \ done \ && curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-arm64.tar.gz" \ && echo "b0660398fe590f8588431a787e9b032c7271a2fa88306c7a26e751571df998e4 node-v$NODE_VERSION-linux-arm64.tar.gz" | sha256sum -c - \ && tar -xzf "node-v$NODE_VERSION-linux-arm64.tar.gz" -C /usr/local --strip-components=1 \ && rm "node-v$NODE_VERSION-linux-arm64.tar.gz" \ && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \ && curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \ && gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \ && mkdir -p /opt/yarn \ && tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \ && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \ && ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \ && rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \ && npm config set unsafe-perm true -g --unsafe-perm \ && rm -rf /tmp/* CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"] RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \ && echo "Running test-stack@node" \ && chmod +x test-stack@node.sh \ && bash test-stack@node.sh \ && rm -rf test-stack@node.sh RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Fedora 33 \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v15.6.0, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info RUN echo $'#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \ && chmod +x /bin/sh-shim \ && cp /bin/sh /bin/sh.real \ && mv /bin/sh-shim /bin/sh
Java
package com.flora.support; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.StringWriter; import java.io.Writer; import java.util.Map; import org.apache.velocity.VelocityContext; import org.apache.velocity.app.VelocityEngine; import org.apache.velocity.context.Context; import com.flora.Config; public class VelocityTemplate { private VelocityEngine velocityEngine; private Config config; public VelocityTemplate(){ } public String parseTemplate(String template, Map model){ model.putAll(Config.getPageTools()); Context context = new VelocityContext(model); Writer writer = new StringWriter(); try { velocityEngine.mergeTemplate(template, "UTF-8", context, writer); } catch (Exception e) { } return writer.toString(); } public void parseTemplate(String template, Map model, Writer writer){ model.putAll(Config.getPageTools()); Context context = new VelocityContext(model); try { velocityEngine.mergeTemplate(template, "UTF-8", context, writer); } catch (Exception e) { } } public void parseTemplate(String template, Map model, OutputStream os){ model.putAll(Config.getPageTools()); Context context = new VelocityContext(model); Writer writer = new OutputStreamWriter(os); try { velocityEngine.mergeTemplate(template, "UTF-8", context, writer); } catch (Exception e) { } } public void setVelocityEngine(VelocityEngine velocityEngine) { this.velocityEngine = velocityEngine; } public Config getConfig() { return config; } public void setConfig(Config config) { this.config = config; } }
Java
package org.apache.lucene.search; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; /** * Subclass of FilteredTermEnum for enumerating all terms that match the * specified wildcard filter term. * <p> * Term enumerations are always ordered by Term.compareTo(). Each term in * the enumeration is greater than all that precede it. * * @version $Id: WildcardTermEnum.java 329859 2005-10-31 17:05:36Z bmesser $ */ public class WildcardTermEnum extends FilteredTermEnum { Term searchTerm; String field = ""; String text = ""; String pre = ""; int preLen = 0; boolean endEnum = false; /** * Creates a new <code>WildcardTermEnum</code>. Passing in a * {@link org.apache.lucene.index.Term Term} that does not contain a * <code>WILDCARD_CHAR</code> will cause an exception to be thrown. * <p> * After calling the constructor the enumeration is already pointing to the first * valid term if such a term exists. */ public WildcardTermEnum(IndexReader reader, Term term) throws IOException { super(); searchTerm = term; field = searchTerm.field(); text = searchTerm.text(); int sidx = text.indexOf(WILDCARD_STRING); int cidx = text.indexOf(WILDCARD_CHAR); int idx = sidx; if (idx == -1) { idx = cidx; } else if (cidx >= 0) { idx = Math.min(idx, cidx); } pre = searchTerm.text().substring(0,idx); preLen = pre.length(); text = text.substring(preLen); setEnum(reader.terms(new Term(searchTerm.field(), pre))); } protected final boolean termCompare(Term term) { if (field == term.field()) { String searchText = term.text(); if (searchText.startsWith(pre)) { return wildcardEquals(text, 0, searchText, preLen); } } endEnum = true; return false; } public final float difference() { return 1.0f; } public final boolean endEnum() { return endEnum; } /******************************************** * String equality with support for wildcards ********************************************/ public static final char WILDCARD_STRING = '*'; public static final char WILDCARD_CHAR = '?'; /** * Determines if a word matches a wildcard pattern. * <small>Work released by Granta Design Ltd after originally being done on * company time.</small> */ public static final boolean wildcardEquals(String pattern, int patternIdx, String string, int stringIdx) { int p = patternIdx; for (int s = stringIdx; ; ++p, ++s) { // End of string yet? boolean sEnd = (s >= string.length()); // End of pattern yet? boolean pEnd = (p >= pattern.length()); // If we're looking at the end of the string... if (sEnd) { // Assume the only thing left on the pattern is/are wildcards boolean justWildcardsLeft = true; // Current wildcard position int wildcardSearchPos = p; // While we haven't found the end of the pattern, // and haven't encountered any non-wildcard characters while (wildcardSearchPos < pattern.length() && justWildcardsLeft) { // Check the character at the current position char wildchar = pattern.charAt(wildcardSearchPos); // If it's not a wildcard character, then there is more // pattern information after this/these wildcards. if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING) { justWildcardsLeft = false; } else { // to prevent "cat" matches "ca??" if (wildchar == WILDCARD_CHAR) { return false; } // Look at the next character wildcardSearchPos++; } } // This was a prefix wildcard search, and we've matched, so // return true. if (justWildcardsLeft) { return true; } } // If we've gone past the end of the string, or the pattern, // return false. if (sEnd || pEnd) { break; } // Match a single character, so continue. if (pattern.charAt(p) == WILDCARD_CHAR) { continue; } // if (pattern.charAt(p) == WILDCARD_STRING) { // Look at the character beyond the '*'. ++p; // Examine the string, starting at the last character. for (int i = string.length(); i >= s; --i) { if (wildcardEquals(pattern, p, string, i)) { return true; } } break; } if (pattern.charAt(p) != string.charAt(s)) { break; } } return false; } public void close() throws IOException { super.close(); searchTerm = null; field = null; text = null; } }
Java
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! Test infrastructure for the Diem VM. //! //! This crate contains helpers for executing tests against the Diem VM. use diem_types::{transaction::TransactionStatus, vm_status::KeptVMStatus}; pub mod account; pub mod account_universe; pub mod common_transactions; pub mod compile; pub mod currencies; pub mod data_store; pub mod execution_strategies; pub mod executor; pub mod gas_costs; mod golden_outputs; pub mod keygen; mod proptest_types; pub fn assert_status_eq(s1: &KeptVMStatus, s2: &KeptVMStatus) -> bool { assert_eq!(s1, s2); true } pub fn transaction_status_eq(t1: &TransactionStatus, t2: &TransactionStatus) -> bool { match (t1, t2) { (TransactionStatus::Discard(s1), TransactionStatus::Discard(s2)) => { assert_eq!(s1, s2); true } (TransactionStatus::Keep(s1), TransactionStatus::Keep(s2)) => { assert_eq!(s1, s2); true } _ => false, } } #[macro_export] macro_rules! assert_prologue_parity { ($e1:expr, $e2:expr, $e3:expr) => { assert_eq!($e1.unwrap(), $e3); assert!(transaction_status_eq($e2, &TransactionStatus::Discard($e3))); }; } #[macro_export] macro_rules! assert_prologue_disparity { ($e1:expr => $e2:expr, $e3:expr => $e4:expr) => { assert_eq!($e1, $e2); assert!(transaction_status_eq($e3, &$e4)); }; } /// Returns the name of the current function. This macro is used to derive the name for the golden /// file of each test case. #[macro_export] macro_rules! current_function_name { () => {{ fn f() {} fn type_name_of<T>(_: T) -> &'static str { std::any::type_name::<T>() } let name = type_name_of(f); &name[..name.len() - 3] }}; }
Java
# Carlina rhopalachyron Cadevall & Sallent SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
Java
package com.github.ayltai.foscam.client; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import android.support.annotation.NonNull; import android.support.annotation.VisibleForTesting; import android.support.v4.util.Pair; import rx.Subscriber; import rx.Subscription; import rx.subjects.PublishSubject; import rx.subjects.SerializedSubject; import rx.subjects.Subject; public /* final */ class RxBus { private static final ThreadLocal<RxBus> INSTANCE = new ThreadLocal<>(); private final Map<Pair<Class, Subscriber>, Subscription> subscriptions = new HashMap<>(); private final Subject<Object, ?> bus = new SerializedSubject<>(PublishSubject.create()); public static RxBus getInstance() { final RxBus instance = RxBus.INSTANCE.get(); if (instance == null) { RxBus.INSTANCE.set(new RxBus()); return RxBus.INSTANCE.get(); } return instance; } @VisibleForTesting RxBus() { } public <T> void register(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) { final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber); if (this.subscriptions.containsKey(key)) throw new IllegalArgumentException("The given subscriber is already registered"); this.subscriptions.put(key, this.bus.filter(event -> event != null && event.getClass().equals(eventType)).subscribe(value -> subscriber.onNext((T)value))); } public <T> void unregister(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) { final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber); if (this.subscriptions.containsKey(key)) this.subscriptions.remove(key).unsubscribe(); } public void unregisterAll() { for (final Pair<Class, Subscriber> pair : new HashSet<>(this.subscriptions.keySet())) { this.unregister(pair.first, pair.second); } } public <T> void send(@NonNull final T event) { if (!this.subscriptions.isEmpty()) this.bus.onNext(event); } }
Java
# Copyright 2019 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Term aggregations.""" from __future__ import unicode_literals from timesketch.lib.aggregators import manager from timesketch.lib.aggregators import interface def get_spec(field, limit=10, query='', query_dsl=''): """Returns aggregation specs for a term of filtered events. The aggregation spec will summarize values of an attribute whose events fall under a filter. Args: field (str): this denotes the event attribute that is used for aggregation. limit (int): How many buckets to return, defaults to 10. query (str): the query field to run on all documents prior to aggregating the results. query_dsl (str): the query DSL field to run on all documents prior to aggregating the results (optional). Either a query string or a query DSL has to be present. Raises: ValueError: if neither query_string or query_dsl is provided. Returns: a dict value that can be used as an aggregation spec. """ if query: query_filter = { 'bool': { 'must': [ { 'query_string': { 'query': query } } ] } } elif query_dsl: query_filter = query_dsl else: raise ValueError('Neither query nor query DSL provided.') return { 'query': query_filter, 'aggs': { 'aggregation': { 'terms': { 'field': field, 'size': limit } } } } class FilteredTermsAggregation(interface.BaseAggregator): """Query Filter Term Aggregation.""" NAME = 'query_bucket' DISPLAY_NAME = 'Filtered Terms Aggregation' DESCRIPTION = 'Aggregating values of a field after applying a filter' SUPPORTED_CHARTS = frozenset( ['barchart', 'circlechart', 'hbarchart', 'linechart', 'table']) FORM_FIELDS = [ { 'type': 'ts-dynamic-form-select-input', 'name': 'supported_charts', 'label': 'Chart type to render', 'options': list(SUPPORTED_CHARTS), 'display': True }, { 'name': 'query_string', 'type': 'ts-dynamic-form-text-input', 'label': 'The filter query to narrow down the result set', 'placeholder': 'Query', 'default_value': '', 'display': True }, { 'name': 'query_dsl', 'type': 'ts-dynamic-form-text-input', 'label': 'The filter query DSL to narrow down the result', 'placeholder': 'Query DSL', 'default_value': '', 'display': False }, { 'name': 'field', 'type': 'ts-dynamic-form-text-input', 'label': 'What field to aggregate.', 'display': True }, { 'type': 'ts-dynamic-form-datetime-input', 'name': 'start_time', 'label': ( 'ISO formatted timestamp for the start time ' 'of the aggregated data'), 'placeholder': 'Enter a start date for the aggregation', 'default_value': '', 'display': True }, { 'type': 'ts-dynamic-form-datetime-input', 'name': 'end_time', 'label': 'ISO formatted end time for the aggregation', 'placeholder': 'Enter an end date for the aggregation', 'default_value': '', 'display': True }, { 'type': 'ts-dynamic-form-text-input', 'name': 'limit', 'label': 'Number of results to return', 'placeholder': 'Enter number of results to return', 'default_value': '10', 'display': True } ] @property def chart_title(self): """Returns a title for the chart.""" if self.field: return 'Top filtered results for "{0:s}"'.format(self.field) return 'Top results for an unknown field after filtering' # pylint: disable=arguments-differ def run( self, field, query_string='', query_dsl='', supported_charts='table', start_time='', end_time='', limit=10): """Run the aggregation. Args: field (str): this denotes the event attribute that is used for aggregation. query_string (str): the query field to run on all documents prior to aggregating the results. query_dsl (str): the query DSL field to run on all documents prior to aggregating the results. Either a query string or a query DSL has to be present. supported_charts: Chart type to render. Defaults to table. start_time: Optional ISO formatted date string that limits the time range for the aggregation. end_time: Optional ISO formatted date string that limits the time range for the aggregation. limit (int): How many buckets to return, defaults to 10. Returns: Instance of interface.AggregationResult with aggregation result. Raises: ValueError: if neither query_string or query_dsl is provided. """ if not (query_string or query_dsl): raise ValueError('Both query_string and query_dsl are missing') self.field = field formatted_field_name = self.format_field_by_type(field) aggregation_spec = get_spec( field=formatted_field_name, limit=limit, query=query_string, query_dsl=query_dsl) aggregation_spec = self._add_query_to_aggregation_spec( aggregation_spec, start_time=start_time, end_time=end_time) # Encoding information for Vega-Lite. encoding = { 'x': { 'field': field, 'type': 'nominal', 'sort': { 'op': 'sum', 'field': 'count', 'order': 'descending' } }, 'y': {'field': 'count', 'type': 'quantitative'}, 'tooltip': [ {'field': field, 'type': 'nominal'}, {'field': 'count', 'type': 'quantitative'}], } response = self.opensearch_aggregation(aggregation_spec) aggregations = response.get('aggregations', {}) aggregation = aggregations.get('aggregation', {}) buckets = aggregation.get('buckets', []) values = [] for bucket in buckets: d = { field: bucket.get('key', 'N/A'), 'count': bucket.get('doc_count', 0) } values.append(d) if query_string: extra_query_url = 'AND {0:s}'.format(query_string) else: extra_query_url = '' return interface.AggregationResult( encoding=encoding, values=values, chart_type=supported_charts, sketch_url=self._sketch_url, field=field, extra_query_url=extra_query_url) manager.AggregatorManager.register_aggregator(FilteredTermsAggregation)
Java
<?xml version="1.0" encoding="ascii"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <title>gluon.validators.IS_IN_DB</title> <link rel="stylesheet" href="epydoc.css" type="text/css" /> <script type="text/javascript" src="epydoc.js"></script> </head> <body bgcolor="white" text="black" link="blue" vlink="#204080" alink="#204080"> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="gluon-module.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://www.web2py.com">web2py Web Framework</a></th> </tr></table></th> </tr> </table> <table width="100%" cellpadding="0" cellspacing="0"> <tr valign="top"> <td width="100%"> <span class="breadcrumbs"> <a href="gluon-module.html">Package&nbsp;gluon</a> :: <a href="gluon.validators-module.html" onclick="show_private();">Module&nbsp;validators</a> :: Class&nbsp;IS_IN_DB </span> </td> <td> <table cellpadding="0" cellspacing="0"> <!-- hide/show private --> <tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink" onclick="toggle_private();">hide&nbsp;private</a>]</span></td></tr> <tr><td align="right"><span class="options" >[<a href="frames.html" target="_top">frames</a >]&nbsp;|&nbsp;<a href="gluon.validators.IS_IN_DB-class.html" target="_top">no&nbsp;frames</a>]</span></td></tr> </table> </td> </tr> </table> <!-- ==================== CLASS DESCRIPTION ==================== --> <h1 class="epydoc">Class IS_IN_DB</h1><p class="nomargin-top"><span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB">source&nbsp;code</a></span></p> <pre class="base-tree"> object --+ | <a href="gluon.validators.Validator-class.html" onclick="show_private();">Validator</a> --+ | <strong class="uidshort">IS_IN_DB</strong> </pre> <hr /> <p>example:</p> <pre class="literalblock"> INPUT(_type='text', _name='name', requires=IS_IN_DB(db, db.mytable.myfield, zero='')) </pre> <p>used for reference fields, rendered as a dropbox</p> <!-- ==================== INSTANCE METHODS ==================== --> <a name="section-InstanceMethods"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Instance Methods</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-InstanceMethods" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="gluon.validators.IS_IN_DB-class.html#__init__" class="summary-sig-name">__init__</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">dbset</span>, <span class="summary-sig-arg">field</span>, <span class="summary-sig-arg">label</span>=<span class="summary-sig-default">None</span>, <span class="summary-sig-arg">error_message</span>=<span class="summary-sig-default"><code class="variable-quote">'</code><code class="variable-string">value not in database</code><code class="variable-quote">'</code></span>, <span class="summary-sig-arg">orderby</span>=<span class="summary-sig-default">None</span>, <span class="summary-sig-arg">groupby</span>=<span class="summary-sig-default">None</span>, <span class="summary-sig-arg">distinct</span>=<span class="summary-sig-default">None</span>, <span class="summary-sig-arg">cache</span>=<span class="summary-sig-default">None</span>, <span class="summary-sig-arg">multiple</span>=<span class="summary-sig-default">False</span>, <span class="summary-sig-arg">zero</span>=<span class="summary-sig-default"><code class="variable-quote">'</code><code class="variable-string"></code><code class="variable-quote">'</code></span>, <span class="summary-sig-arg">sort</span>=<span class="summary-sig-default">False</span>, <span class="summary-sig-arg">_and</span>=<span class="summary-sig-default">None</span>)</span><br /> x.__init__(...) initializes x; see help(type(x)) for signature</td> <td align="right" valign="top"> <span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.__init__">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="set_self_id"></a><span class="summary-sig-name">set_self_id</span>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">id</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.set_self_id">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="build_set"></a><span class="summary-sig-name">build_set</span>(<span class="summary-sig-arg">self</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.build_set">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a name="options"></a><span class="summary-sig-name">options</span>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">zero</span>=<span class="summary-sig-default">True</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.options">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td width="15%" align="right" valign="top" class="summary"> <span class="summary-type">&nbsp;</span> </td><td class="summary"> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr> <td><span class="summary-sig"><a href="gluon.validators.IS_IN_DB-class.html#__call__" class="summary-sig-name">__call__</a>(<span class="summary-sig-arg">self</span>, <span class="summary-sig-arg">value</span>)</span></td> <td align="right" valign="top"> <span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.__call__">source&nbsp;code</a></span> </td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code><a href="gluon.validators.Validator-class.html" onclick="show_private();">Validator</a></code></b>: <code><a href="gluon.validators.Validator-class.html#formatter">formatter</a></code> </p> <p class="indent-wrapped-lines"><b>Inherited from <code>object</code></b>: <code>__delattr__</code>, <code>__format__</code>, <code>__getattribute__</code>, <code>__hash__</code>, <code>__new__</code>, <code>__reduce__</code>, <code>__reduce_ex__</code>, <code>__repr__</code>, <code>__setattr__</code>, <code>__sizeof__</code>, <code>__str__</code>, <code>__subclasshook__</code> </p> </td> </tr> </table> <!-- ==================== PROPERTIES ==================== --> <a name="section-Properties"></a> <table class="summary" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Properties</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-Properties" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> <tr> <td colspan="2" class="summary"> <p class="indent-wrapped-lines"><b>Inherited from <code>object</code></b>: <code>__class__</code> </p> </td> </tr> </table> <!-- ==================== METHOD DETAILS ==================== --> <a name="section-MethodDetails"></a> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr bgcolor="#70b0f0" class="table-header"> <td colspan="2" class="table-header"> <table border="0" cellpadding="0" cellspacing="0" width="100%"> <tr valign="top"> <td align="left"><span class="table-header">Method Details</span></td> <td align="right" valign="top" ><span class="options">[<a href="#section-MethodDetails" class="privatelink" onclick="toggle_private();" >hide private</a>]</span></td> </tr> </table> </td> </tr> </table> <a name="__init__"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">__init__</span>(<span class="sig-arg">self</span>, <span class="sig-arg">dbset</span>, <span class="sig-arg">field</span>, <span class="sig-arg">label</span>=<span class="sig-default">None</span>, <span class="sig-arg">error_message</span>=<span class="sig-default"><code class="variable-quote">'</code><code class="variable-string">value not in database</code><code class="variable-quote">'</code></span>, <span class="sig-arg">orderby</span>=<span class="sig-default">None</span>, <span class="sig-arg">groupby</span>=<span class="sig-default">None</span>, <span class="sig-arg">distinct</span>=<span class="sig-default">None</span>, <span class="sig-arg">cache</span>=<span class="sig-default">None</span>, <span class="sig-arg">multiple</span>=<span class="sig-default">False</span>, <span class="sig-arg">zero</span>=<span class="sig-default"><code class="variable-quote">'</code><code class="variable-string"></code><code class="variable-quote">'</code></span>, <span class="sig-arg">sort</span>=<span class="sig-default">False</span>, <span class="sig-arg">_and</span>=<span class="sig-default">None</span>)</span> <br /><em class="fname">(Constructor)</em> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.__init__">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <p>x.__init__(...) initializes x; see help(type(x)) for signature</p> <dl class="fields"> <dt>Overrides: object.__init__ <dd><em class="note">(inherited documentation)</em></dd> </dt> </dl> </td></tr></table> </div> <a name="__call__"></a> <div> <table class="details" border="1" cellpadding="3" cellspacing="0" width="100%" bgcolor="white"> <tr><td> <table width="100%" cellpadding="0" cellspacing="0" border="0"> <tr valign="top"><td> <h3 class="epydoc"><span class="sig"><span class="sig-name">__call__</span>(<span class="sig-arg">self</span>, <span class="sig-arg">value</span>)</span> <br /><em class="fname">(Call operator)</em> </h3> </td><td align="right" valign="top" ><span class="codelink"><a href="gluon.validators-pysrc.html#IS_IN_DB.__call__">source&nbsp;code</a></span>&nbsp; </td> </tr></table> <dl class="fields"> <dt>Overrides: <a href="gluon.validators.Validator-class.html#__call__">Validator.__call__</a> </dt> </dl> </td></tr></table> </div> <br /> <!-- ==================== NAVIGATION BAR ==================== --> <table class="navbar" border="0" width="100%" cellpadding="0" bgcolor="#a0c0ff" cellspacing="0"> <tr valign="middle"> <!-- Home link --> <th>&nbsp;&nbsp;&nbsp;<a href="gluon-module.html">Home</a>&nbsp;&nbsp;&nbsp;</th> <!-- Tree link --> <th>&nbsp;&nbsp;&nbsp;<a href="module-tree.html">Trees</a>&nbsp;&nbsp;&nbsp;</th> <!-- Index link --> <th>&nbsp;&nbsp;&nbsp;<a href="identifier-index.html">Indices</a>&nbsp;&nbsp;&nbsp;</th> <!-- Help link --> <th>&nbsp;&nbsp;&nbsp;<a href="help.html">Help</a>&nbsp;&nbsp;&nbsp;</th> <!-- Project homepage --> <th class="navbar" align="right" width="100%"> <table border="0" cellpadding="0" cellspacing="0"> <tr><th class="navbar" align="center" ><a class="navbar" target="_top" href="http://www.web2py.com">web2py Web Framework</a></th> </tr></table></th> </tr> </table> <table border="0" cellpadding="0" cellspacing="0" width="100%%"> <tr> <td align="left" class="footer"> Generated by Epydoc 3.0.1 on Thu Nov 28 13:54:45 2013 </td> <td align="right" class="footer"> <a target="mainFrame" href="http://epydoc.sourceforge.net" >http://epydoc.sourceforge.net</a> </td> </tr> </table> <script type="text/javascript"> <!-- // Private objects are initially displayed (because if // javascript is turned off then we want them to be // visible); but by default, we want to hide them. So hide // them unless we have a cookie that says to show them. checkCookie(); // --> </script> </body> </html>
Java
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ /* * This code was generated by https://github.com/googleapis/google-api-java-client-services/ * Modify at your own risk. */ package com.google.api.services.retail.v2; /** * Available OAuth 2.0 scopes for use with the Retail API. * * @since 1.4 */ public class CloudRetailScopes { /** See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account.. */ public static final String CLOUD_PLATFORM = "https://www.googleapis.com/auth/cloud-platform"; /** * Returns an unmodifiable set that contains all scopes declared by this class. * * @since 1.16 */ public static java.util.Set<String> all() { java.util.Set<String> set = new java.util.HashSet<String>(); set.add(CLOUD_PLATFORM); return java.util.Collections.unmodifiableSet(set); } private CloudRetailScopes() { } }
Java
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from textwrap import dedent import unittest from eventlet.green import ssl import mock from six.moves.configparser import NoSectionError, NoOptionError from swift.common.middleware import memcache from swift.common.memcached import MemcacheRing from swift.common.swob import Request from swift.common.wsgi import loadapp from test.unit import with_tempdir, patch_policies class FakeApp(object): def __call__(self, env, start_response): return env class ExcConfigParser(object): def read(self, path): raise Exception('read called with %r' % path) class EmptyConfigParser(object): def read(self, path): return False def get_config_parser(memcache_servers='1.2.3.4:5', memcache_serialization_support='1', memcache_max_connections='4', section='memcache'): _srvs = memcache_servers _sers = memcache_serialization_support _maxc = memcache_max_connections _section = section class SetConfigParser(object): def items(self, section_name): if section_name != section: raise NoSectionError(section_name) return { 'memcache_servers': memcache_servers, 'memcache_serialization_support': memcache_serialization_support, 'memcache_max_connections': memcache_max_connections, } def read(self, path): return True def get(self, section, option): if _section == section: if option == 'memcache_servers': if _srvs == 'error': raise NoOptionError(option, section) return _srvs elif option == 'memcache_serialization_support': if _sers == 'error': raise NoOptionError(option, section) return _sers elif option in ('memcache_max_connections', 'max_connections'): if _maxc == 'error': raise NoOptionError(option, section) return _maxc else: raise NoOptionError(option, section) else: raise NoSectionError(option) return SetConfigParser def start_response(*args): pass class TestCacheMiddleware(unittest.TestCase): def setUp(self): self.app = memcache.MemcacheMiddleware(FakeApp(), {}) def test_cache_middleware(self): req = Request.blank('/something', environ={'REQUEST_METHOD': 'GET'}) resp = self.app(req.environ, start_response) self.assertTrue('swift.cache' in resp) self.assertTrue(isinstance(resp['swift.cache'], MemcacheRing)) def test_conf_default_read(self): with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser): for d in ({}, {'memcache_servers': '6.7.8.9:10'}, {'memcache_serialization_support': '0'}, {'memcache_max_connections': '30'}, {'memcache_servers': '6.7.8.9:10', 'memcache_serialization_support': '0'}, {'memcache_servers': '6.7.8.9:10', 'memcache_max_connections': '30'}, {'memcache_serialization_support': '0', 'memcache_max_connections': '30'} ): with self.assertRaises(Exception) as catcher: memcache.MemcacheMiddleware(FakeApp(), d) self.assertEqual( str(catcher.exception), "read called with '/etc/swift/memcache.conf'") def test_conf_set_no_read(self): with mock.patch.object(memcache, 'ConfigParser', ExcConfigParser): exc = None try: memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '1.2.3.4:5', 'memcache_serialization_support': '2', 'memcache_max_connections': '30'}) except Exception as err: exc = err self.assertIsNone(exc) def test_conf_default(self): with mock.patch.object(memcache, 'ConfigParser', EmptyConfigParser): app = memcache.MemcacheMiddleware(FakeApp(), {}) self.assertEqual(app.memcache_servers, '127.0.0.1:11211') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, False) self.assertEqual( app.memcache._client_cache['127.0.0.1:11211'].max_size, 2) def test_conf_inline(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '6.7.8.9:10', 'memcache_serialization_support': '0', 'memcache_max_connections': '5'}) self.assertEqual(app.memcache_servers, '6.7.8.9:10') self.assertEqual(app.memcache._allow_pickle, True) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['6.7.8.9:10'].max_size, 5) def test_conf_inline_ratelimiting(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'error_suppression_limit': '5', 'error_suppression_interval': '2.5'}) self.assertEqual(app.memcache._error_limit_count, 5) self.assertEqual(app.memcache._error_limit_time, 2.5) self.assertEqual(app.memcache._error_limit_duration, 2.5) def test_conf_inline_tls(self): fake_context = mock.Mock() with mock.patch.object(ssl, 'create_default_context', return_value=fake_context): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): memcache.MemcacheMiddleware( FakeApp(), {'tls_enabled': 'true', 'tls_cafile': 'cafile', 'tls_certfile': 'certfile', 'tls_keyfile': 'keyfile'}) ssl.create_default_context.assert_called_with(cafile='cafile') fake_context.load_cert_chain.assert_called_with('certfile', 'keyfile') def test_conf_extra_no_section(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser(section='foobar')): app = memcache.MemcacheMiddleware(FakeApp(), {}) self.assertEqual(app.memcache_servers, '127.0.0.1:11211') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, False) self.assertEqual( app.memcache._client_cache['127.0.0.1:11211'].max_size, 2) def test_conf_extra_no_option(self): replacement_parser = get_config_parser( memcache_servers='error', memcache_serialization_support='error', memcache_max_connections='error') with mock.patch.object(memcache, 'ConfigParser', replacement_parser): app = memcache.MemcacheMiddleware(FakeApp(), {}) self.assertEqual(app.memcache_servers, '127.0.0.1:11211') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, False) self.assertEqual( app.memcache._client_cache['127.0.0.1:11211'].max_size, 2) def test_conf_inline_other_max_conn(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '6.7.8.9:10', 'memcache_serialization_support': '0', 'max_connections': '5'}) self.assertEqual(app.memcache_servers, '6.7.8.9:10') self.assertEqual(app.memcache._allow_pickle, True) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['6.7.8.9:10'].max_size, 5) def test_conf_inline_bad_max_conn(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '6.7.8.9:10', 'memcache_serialization_support': '0', 'max_connections': 'bad42'}) self.assertEqual(app.memcache_servers, '6.7.8.9:10') self.assertEqual(app.memcache._allow_pickle, True) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['6.7.8.9:10'].max_size, 4) def test_conf_from_extra_conf(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware(FakeApp(), {}) self.assertEqual(app.memcache_servers, '1.2.3.4:5') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['1.2.3.4:5'].max_size, 4) def test_conf_from_extra_conf_bad_max_conn(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser( memcache_max_connections='bad42')): app = memcache.MemcacheMiddleware(FakeApp(), {}) self.assertEqual(app.memcache_servers, '1.2.3.4:5') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['1.2.3.4:5'].max_size, 2) def test_conf_from_inline_and_maxc_from_extra_conf(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '6.7.8.9:10', 'memcache_serialization_support': '0'}) self.assertEqual(app.memcache_servers, '6.7.8.9:10') self.assertEqual(app.memcache._allow_pickle, True) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['6.7.8.9:10'].max_size, 4) def test_conf_from_inline_and_sers_from_extra_conf(self): with mock.patch.object(memcache, 'ConfigParser', get_config_parser()): app = memcache.MemcacheMiddleware( FakeApp(), {'memcache_servers': '6.7.8.9:10', 'memcache_max_connections': '42'}) self.assertEqual(app.memcache_servers, '6.7.8.9:10') self.assertEqual(app.memcache._allow_pickle, False) self.assertEqual(app.memcache._allow_unpickle, True) self.assertEqual( app.memcache._client_cache['6.7.8.9:10'].max_size, 42) def test_filter_factory(self): factory = memcache.filter_factory({'max_connections': '3'}, memcache_servers='10.10.10.10:10', memcache_serialization_support='1') thefilter = factory('myapp') self.assertEqual(thefilter.app, 'myapp') self.assertEqual(thefilter.memcache_servers, '10.10.10.10:10') self.assertEqual(thefilter.memcache._allow_pickle, False) self.assertEqual(thefilter.memcache._allow_unpickle, True) self.assertEqual( thefilter.memcache._client_cache['10.10.10.10:10'].max_size, 3) @patch_policies def _loadapp(self, proxy_config_path): """ Load a proxy from an app.conf to get the memcache_ring :returns: the memcache_ring of the memcache middleware filter """ with mock.patch('swift.proxy.server.Ring'): app = loadapp(proxy_config_path) memcache_ring = None while True: memcache_ring = getattr(app, 'memcache', None) if memcache_ring: break app = app.app return memcache_ring @with_tempdir def test_real_config(self, tempdir): config = """ [pipeline:main] pipeline = cache proxy-server [app:proxy-server] use = egg:swift#proxy [filter:cache] use = egg:swift#memcache """ config_path = os.path.join(tempdir, 'test.conf') with open(config_path, 'w') as f: f.write(dedent(config)) memcache_ring = self._loadapp(config_path) # only one server by default self.assertEqual(list(memcache_ring._client_cache.keys()), ['127.0.0.1:11211']) # extra options self.assertEqual(memcache_ring._connect_timeout, 0.3) self.assertEqual(memcache_ring._pool_timeout, 1.0) # tries is limited to server count self.assertEqual(memcache_ring._tries, 1) self.assertEqual(memcache_ring._io_timeout, 2.0) @with_tempdir def test_real_config_with_options(self, tempdir): config = """ [pipeline:main] pipeline = cache proxy-server [app:proxy-server] use = egg:swift#proxy [filter:cache] use = egg:swift#memcache memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211, 10.0.0.4:11211 connect_timeout = 1.0 pool_timeout = 0.5 tries = 4 io_timeout = 1.0 tls_enabled = true """ config_path = os.path.join(tempdir, 'test.conf') with open(config_path, 'w') as f: f.write(dedent(config)) memcache_ring = self._loadapp(config_path) self.assertEqual(sorted(memcache_ring._client_cache.keys()), ['10.0.0.%d:11211' % i for i in range(1, 5)]) # extra options self.assertEqual(memcache_ring._connect_timeout, 1.0) self.assertEqual(memcache_ring._pool_timeout, 0.5) # tries is limited to server count self.assertEqual(memcache_ring._tries, 4) self.assertEqual(memcache_ring._io_timeout, 1.0) self.assertEqual(memcache_ring._error_limit_count, 10) self.assertEqual(memcache_ring._error_limit_time, 60) self.assertEqual(memcache_ring._error_limit_duration, 60) self.assertIsInstance( list(memcache_ring._client_cache.values())[0]._tls_context, ssl.SSLContext) @with_tempdir def test_real_memcache_config(self, tempdir): proxy_config = """ [DEFAULT] swift_dir = %s [pipeline:main] pipeline = cache proxy-server [app:proxy-server] use = egg:swift#proxy [filter:cache] use = egg:swift#memcache connect_timeout = 1.0 """ % tempdir proxy_config_path = os.path.join(tempdir, 'test.conf') with open(proxy_config_path, 'w') as f: f.write(dedent(proxy_config)) memcache_config = """ [memcache] memcache_servers = 10.0.0.1:11211,10.0.0.2:11211,10.0.0.3:11211, 10.0.0.4:11211 connect_timeout = 0.5 io_timeout = 1.0 error_suppression_limit = 0 error_suppression_interval = 1.5 """ memcache_config_path = os.path.join(tempdir, 'memcache.conf') with open(memcache_config_path, 'w') as f: f.write(dedent(memcache_config)) memcache_ring = self._loadapp(proxy_config_path) self.assertEqual(sorted(memcache_ring._client_cache.keys()), ['10.0.0.%d:11211' % i for i in range(1, 5)]) # proxy option takes precedence self.assertEqual(memcache_ring._connect_timeout, 1.0) # default tries are not limited by servers self.assertEqual(memcache_ring._tries, 3) # memcache conf options are defaults self.assertEqual(memcache_ring._io_timeout, 1.0) self.assertEqual(memcache_ring._error_limit_count, 0) self.assertEqual(memcache_ring._error_limit_time, 1.5) self.assertEqual(memcache_ring._error_limit_duration, 1.5) if __name__ == '__main__': unittest.main()
Java
<?php /** * HiPay fullservice Magento2 * * NOTICE OF LICENSE * * This source file is subject to the Apache 2.0 Licence * that is bundled with this package in the file LICENSE.md. * It is also available through the world-wide-web at this URL: * http://www.apache.org/licenses/LICENSE-2.0 * * @copyright Copyright (c) 2019 - HiPay * @license http://www.apache.org/licenses/LICENSE-2.0 Apache 2.0 Licence */ namespace HiPay\FullserviceMagento\Model\Request\ThreeDS; use HiPay\FullserviceMagento\Model\Request\AbstractRequest; use HiPay\Fullservice\Gateway\Model\Request\ThreeDSTwo\BrowserInfo; /** * * @author HiPay <support@hipay.com> * @copyright Copyright (c) 2019 - HiPay * @license http://www.apache.org/licenses/LICENSE-2.0 Apache 2.0 Licence * @link https://github.com/hipay/hipay-fullservice-sdk-magento2 */ class BrowserInfoFormatter extends AbstractRequest { /** * @var \HiPay\FullserviceMagento\Helper\ThreeDSTwo */ protected $_threeDSHelper; /** * @var \Magento\Sales\Model\Order */ protected $_order; /** * BrowserInfoFormatter constructor. * * @param \Psr\Log\LoggerInterface $logger * @param \Magento\Checkout\Helper\Data $checkoutData * @param \Magento\Customer\Model\Session $customerSession * @param \Magento\Checkout\Model\Session $checkoutSession * @param \Magento\Framework\Locale\ResolverInterface $localeResolver * @param \HiPay\FullserviceMagento\Model\Request\Type\Factory $requestFactory * @param \Magento\Framework\UrlInterface $urlBuilder * @param \HiPay\FullserviceMagento\Helper\Data $helper * @param \HiPay\FullserviceMagento\Helper\ThreeDSTwo $threeDSHelper * @param array $params * @throws \Magento\Framework\Exception\LocalizedException */ public function __construct( \Psr\Log\LoggerInterface $logger, \Magento\Checkout\Helper\Data $checkoutData, \Magento\Customer\Model\Session $customerSession, \Magento\Checkout\Model\Session $checkoutSession, \Magento\Framework\Locale\ResolverInterface $localeResolver, \HiPay\FullserviceMagento\Model\Request\Type\Factory $requestFactory, \Magento\Framework\UrlInterface $urlBuilder, \HiPay\FullserviceMagento\Helper\Data $helper, \HiPay\FullserviceMagento\Helper\ThreeDSTwo $threeDSHelper, $params = [] ) { parent::__construct( $logger, $checkoutData, $customerSession, $checkoutSession, $localeResolver, $requestFactory, $urlBuilder, $helper, $params ); $this->_threeDSHelper = $threeDSHelper; $this->_order = $params["order"]; } /** * {@inheritDoc} * * @return BrowserInfo * @see \HiPay\FullserviceMagento\Model\Request\AbstractRequest::mapRequest() */ protected function mapRequest() { $browserInfo = new BrowserInfo(); $browserData = json_decode($this->_order->getPayment()->getAdditionalInformation('browser_info')); $browserInfo->ipaddr = $this->_order->getRemoteIp(); $browserInfo->http_accept = isset($_SERVER['HTTP_ACCEPT']) ? $_SERVER['HTTP_ACCEPT'] : null; $browserInfo->javascript_enabled = true; if ($browserData !== null) { $browserInfo->java_enabled = isset($browserData->java_enabled) ? $browserData->java_enabled : null; $browserInfo->language = isset($browserData->language) ? $browserData->language : null; $browserInfo->color_depth = isset($browserData->color_depth) ? $browserData->color_depth : null; $browserInfo->screen_height = isset($browserData->screen_height) ? $browserData->screen_height : null; $browserInfo->screen_width = isset($browserData->screen_width) ? $browserData->screen_width : null; $browserInfo->timezone = isset($browserData->timezone) ? $browserData->timezone : null; $browserInfo->http_user_agent = isset($browserData->http_user_agent) ? $browserData->http_user_agent : null; } return $browserInfo; } }
Java
package com.github.andriell.collection; import org.junit.Test; import static org.junit.Assert.assertEquals; /** * Created by Andrey on 13.02.2016 */ public class HashThreeTest { public static void main(String[] args) { HashThreeTest test = new HashThreeTest(); test.test1(); } @Test public void test1() { ObjectTest test1 = new ObjectTest(0x50000000); ObjectTest test2 = new ObjectTest(0x60000000); ObjectTest test3 = new ObjectTest(0x70000000); ObjectTest test4 = new ObjectTest(0x00000005); ObjectTest test5 = new ObjectTest(0x00000006); ObjectTest test6 = new ObjectTest(0x00000007); HashThree<ObjectTest> three = new HashThree<ObjectTest>(); assertEquals(0, three.getSize()); assertEquals(false, three.remove(test1)); assertEquals(true, three.add(test1)); assertEquals(1, three.getSize()); assertEquals(true, three.add(test2)); assertEquals(2, three.getSize()); assertEquals(true, three.add(test3)); assertEquals(3, three.getSize()); assertEquals(true, three.add(test4)); assertEquals(4, three.getSize()); assertEquals(true, three.add(test5)); assertEquals(5, three.getSize()); assertEquals(true, three.add(test6)); assertEquals(6, three.getSize()); assertEquals(false, three.add(test1)); assertEquals(false, three.add(test2)); assertEquals(false, three.add(test3)); assertEquals(false, three.add(test4)); assertEquals(true, three.replace(test1)); assertEquals(true, three.replace(test2)); assertEquals(true, three.replace(test3)); assertEquals(true, three.replace(test4)); System.out.println(three); assertEquals(true, three.exist(test2)); assertEquals(true, three.remove(test2)); //assertEquals(false, three.remove(test2)); //assertEquals(true, three.exist(test1)); //assertEquals(false, three.exist(test2)); //assertEquals(true, three.exist(test3)); //assertEquals(true, three.exist(test4)); System.out.println(three); } private class ObjectTest { private int hashCode; public ObjectTest(int hashCode) { this.hashCode = hashCode; } @Override public int hashCode() { return hashCode; } @Override public String toString() { return Integer.toString(hashCode); } } }
Java
--- layout: page title: "About" description: "你是你周围关系的反映 " header-img: "img/green.jpg" --- 我是Edward,通过建立博客,希望能够学习互联网语言,read 这个时代 现在正在学习**python** 。 ###remind - 正确的激励产生正确的行为 - 解释宇宙需要不仅仅一个真理 - 你是你周围关系的反映 - 广末凉子很漂亮 ###关注: - [Python](http://liaoxuefeng.com) ###代表作: - [《暂无,此为模板测试》](http://cnfeat.com/blog/2015/05/22/a-24-chinese-fonts/) ###我的朋友们 - [lomo](http://huangyafei.com) ###联系 - [知乎@ewadrd.lv](http://www.zhihu.com/people/yinsi) - 公众号:暂无nulltext <center> <p><img src="" align="center"></p> </center>
Java
nurikabe ======== This project is a qml graphical UI for playing the logic puzzle nurikabe. Requirements ------------ * golang >= 1.3 To install golang visit: https://golang.org/doc/install * Qt >= 5 To install Qt visit: http://qt-project.org/downloads * go-qml run 'go get github.com/gopkg.in/qml.v1' for documentation visit: http://github.com/go-qml/qml Building -------- Once all requirements have been met, you should be able to run 'go build' from the command line. This will build a binary which you can then execute. Note that you must run the binary in the same directory as the qml folder. Levels ---- Nurikabe uses json format for all its levels. You may also generate levels using the nurikabe/gen helper binary. The gen utility also allows for solving levels by piping the json level via stdin and issuing the 'solve' flag. ie. cat my_level.json | gen -solve Usage of ./gen: -base=2: minimum garden size -debug=false: enable debug output -growth=4: garden growth. base + growth is max garden size -height=5: grid height -min=3: minimum gardens count -smart=true: solve using smart algorithm -solve=false: solve generated grid -v=false: Verbose -width=5: grid width
Java
// Copyright 2018 syzkaller project authors. All rights reserved. // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. //go:build !race // +build !race package testutil const RaceEnabled = false
Java
#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $DIR/.gr8_env.sh if [ "$#" -ne 2 ]; then echo "Usage: " `basename $0` "job_id [success_status|job_id|run_status]" exit 0 fi curl -s -X GET \ -H "X-Auth-Token: $GR8_BI_TOK" \ $GR8_BASE_URL/current/$GR8_BI_ACT/job/$1/stats/$2 | tee $DIR/.lastresponse
Java
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import webob from cinder.api.contrib import volume_type_access as type_access from cinder.api.v2 import types as types_api_v2 from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake def generate_type(type_id, is_public): return { 'id': type_id, 'name': u'test', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': bool(is_public) } VOLUME_TYPES = { fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True), fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True), fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False), fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)} PROJ1_UUID = fake.PROJECT_ID PROJ2_UUID = fake.PROJECT2_ID PROJ3_UUID = fake.PROJECT3_ID ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ2_UUID}, {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ3_UUID}, {'volume_type_id': fake.VOLUME_TYPE4_ID, 'project_id': PROJ3_UUID}] def fake_volume_type_get(context, id, inactive=False, expected_fields=None): vol = VOLUME_TYPES[id] if expected_fields and 'projects' in expected_fields: vol['projects'] = [a['project_id'] for a in ACCESS_LIST if a['volume_type_id'] == id] return vol def _has_type_access(type_id, project_id): for access in ACCESS_LIST: if access['volume_type_id'] == type_id and \ access['project_id'] == project_id: return True return False def fake_volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): if filters is None or filters['is_public'] is None: if list_result: return list(VOLUME_TYPES.values()) return VOLUME_TYPES res = {} for k, v in VOLUME_TYPES.items(): if filters['is_public'] and _has_type_access(k, context.project_id): res.update({k: v}) continue if v['is_public'] == filters['is_public']: res.update({k: v}) if list_result: return list(res.values()) return res class FakeResponse(object): obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID}, 'volume_types': [ {'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE3_ID}]} def attach(self, **kwargs): pass class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} def cached_resource_by_id(self, resource_id, name=None): return VOLUME_TYPES[resource_id] class VolumeTypeAccessTest(test.TestCase): def setUp(self): super(VolumeTypeAccessTest, self).setUp() self.type_controller_v2 = types_api_v2.VolumeTypesController() self.type_access_controller = type_access.VolumeTypeAccessController() self.type_action_controller = type_access.VolumeTypeActionController() self.req = FakeRequest() self.context = self.req.environ['cinder.context'] self.stubs.Set(db, 'volume_type_get', fake_volume_type_get) self.stubs.Set(db, 'volume_type_get_all', fake_volume_type_get_all) def assertVolumeTypeListEqual(self, expected, observed): self.assertEqual(len(expected), len(observed)) expected = sorted(expected, key=lambda item: item['id']) observed = sorted(observed, key=lambda item: item['id']) for d1, d2 in zip(expected, observed): self.assertEqual(d1['id'], d2['id']) def test_list_type_access_public(self): """Querying os-volume-type-access on public type should return 404.""" req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.type_access_controller.index, req, fake.VOLUME_TYPE2_ID) def test_list_type_access_private(self): expected = {'volume_type_access': [ {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ2_UUID}, {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ3_UUID}]} result = self.type_access_controller.index(self.req, fake.VOLUME_TYPE3_ID) self.assertEqual(expected, result) def test_list_with_no_context(self): req = fakes.HTTPRequest.blank('/v2/flavors/%s/flavors' % fake.PROJECT_ID) def fake_authorize(context, target=None, action=None): raise exception.PolicyNotAuthorized(action='index') self.stubs.Set(type_access, 'authorize', fake_authorize) self.assertRaises(exception.PolicyNotAuthorized, self.type_access_controller.index, req, fake.PROJECT_ID) def test_list_type_with_admin_default_proj1(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ1_UUID result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_default_proj2(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}, {'id': fake.VOLUME_TYPE3_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types' % PROJ2_UUID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ2_UUID result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_true(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_false(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_false_proj2(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ2_UUID result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_none(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}, {'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_default(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_true(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_false(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_none(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller_v2.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_show(self): resp = FakeResponse() self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID) self.assertEqual({'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, resp.obj['volume_type']) def test_detail(self): resp = FakeResponse() self.type_action_controller.detail(self.req, resp) self.assertEqual( [{'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, {'id': fake.VOLUME_TYPE3_ID, 'os-volume-type-access:is_public': False}], resp.obj['volume_types']) def test_create(self): resp = FakeResponse() self.type_action_controller.create(self.req, {}, resp) self.assertEqual({'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, resp.obj['volume_type']) def test_add_project_access(self): def stub_add_volume_type_access(context, type_id, project_id): self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id") self.assertEqual(PROJ2_UUID, project_id, "project_id") self.stubs.Set(db, 'volume_type_access_add', stub_add_volume_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) result = self.type_action_controller._addProjectAccess( req, fake.VOLUME_TYPE4_ID, body) self.assertEqual(202, result.status_code) def test_add_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False) body = {'addProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(exception.PolicyNotAuthorized, self.type_action_controller._addProjectAccess, req, fake.VOLUME_TYPE3_ID, body) def test_add_project_access_with_already_added_access(self): def stub_add_volume_type_access(context, type_id, project_id): raise exception.VolumeTypeAccessExists(volume_type_id=type_id, project_id=project_id) self.stubs.Set(db, 'volume_type_access_add', stub_add_volume_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.type_action_controller._addProjectAccess, req, fake.VOLUME_TYPE3_ID, body) def test_remove_project_access_with_bad_access(self): def stub_remove_volume_type_access(context, type_id, project_id): raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id, project_id=project_id) self.stubs.Set(db, 'volume_type_access_remove', stub_remove_volume_type_access) body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPNotFound, self.type_action_controller._removeProjectAccess, req, fake.VOLUME_TYPE4_ID, body) def test_remove_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False) body = {'removeProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(exception.PolicyNotAuthorized, self.type_action_controller._removeProjectAccess, req, fake.VOLUME_TYPE3_ID, body)
Java
package com.ryanharter.auto.value.moshi.example; import com.google.auto.value.AutoValue; import com.squareup.moshi.JsonAdapter; import com.squareup.moshi.Moshi; import java.lang.reflect.Type; @AutoValue public abstract class GenericsExample<A, B, C> { public abstract A a(); public abstract B b(); public abstract C c(); @AutoValue.Builder public interface Builder<A, B, C> { Builder<A, B, C> a(A a); Builder<A, B, C> b(B b); Builder<A, B, C> c(C c); GenericsExample<A, B, C> build(); } public static <A, B, C> Builder<A, B, C> builder() { return new AutoValue_GenericsExample.Builder<A, B, C>(); } public static <A, B, C> JsonAdapter<GenericsExample<A, B, C>> jsonAdapter(Moshi moshi, Type[] types) { return new AutoValue_GenericsExample.MoshiJsonAdapter(moshi, types); } }
Java
# Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from src.utils import labels as utils_labels from src.utils import load_ncbi_taxinfo from src import binning_classes import matplotlib matplotlib.use('Agg') import seaborn as sns import matplotlib.pyplot as plt from matplotlib.lines import Line2D import matplotlib.ticker as ticker import numpy as np import os, sys, inspect import pandas as pd from collections import OrderedDict currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) def create_colors_list(): colors_list = [] for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]: colors_list.append(tuple(color)) colors_list.append("black") for color in plt.cm.Set2(np.linspace(0, 1, 8)): colors_list.append(tuple(color)) for color in plt.cm.Set3(np.linspace(0, 1, 12)): colors_list.append(tuple(color)) return colors_list def create_legend(color_indices, available_tools, output_dir): colors_list = create_colors_list() if color_indices: colors_list = [colors_list[i] for i in color_indices] colors_iter = iter(colors_list) circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools] fig = plt.figure(figsize=(0.5, 0.5)) fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1) fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight') plt.close(fig) def plot_precision_vs_bin_size(pd_bins, output_dir): pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS] for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL): fig, axs = plt.subplots(figsize=(5, 4.5)) axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o') axs.set_xlim([None, np.log(pd_tool['total_length'].max())]) axs.set_ylim([0.0, 1.0]) axs.set_title(tool_label, fontsize=12) plt.ylabel('Purity per bin (%)', fontsize=12) plt.xlabel('Bin size [log(# bp)]', fontsize=12) fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight') plt.close(fig) def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir): colors_list = create_colors_list() if len(available_tools) > len(colors_list): raise RuntimeError("Plot only supports 29 colors") fig, axs = plt.subplots(figsize=(5, 4.5)) for i, (color, tool) in enumerate(zip(colors_list, available_tools)): pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index']) axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0]) window = 50 rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean() axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i]) axs.set_ylim([-0.01, 1.01]) axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12) axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12) axs.tick_params(axis='x', labelsize=12) if pd_target_column == 'precision_bp': ylabel = 'Purity per bin (%)' file_name = 'purity_by_genome_coverage' else: ylabel = 'Completeness per genome (%)' file_name = 'completeness_by_genome_coverage' plt.ylabel(ylabel, fontsize=15) plt.xlabel('Average genome coverage', fontsize=15) colors_iter = iter(colors_list) circles = [] for x in range(len(available_tools)): circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter))) lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14) fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig) def get_pd_genomes_recall(sample_id_to_queries_list): pd_genomes_recall = pd.DataFrame() for sample_id in sample_id_to_queries_list: for query in sample_id_to_queries_list[sample_id]: if not isinstance(query, binning_classes.GenomeQuery): continue recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy() recall_df[utils_labels.TOOL] = query.label recall_df['sample_id'] = sample_id recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL]) pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df]) return pd_genomes_recall def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir): # compute average genome coverage if coverages for multiple samples were provided coverages_pd = coverages_pd.groupby(['GENOMEID']).mean() coverages_pd.rename(columns={'GENOMEID': 'genome_id'}) coverages_pd = coverages_pd.sort_values(by=['COVERAGE']) coverages_pd['rank'] = coverages_pd['COVERAGE'].rank() pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list) pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict()) pd_genomes_recall = pd_genomes_recall.reset_index() pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict())) plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir) pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp']) pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict()) pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict())) plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir) def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False): if log_scale: df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0) fig, axs = plt.subplots(figsize=(10, 8)) fontsize = 20 # replace columns and rows labels by numbers d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)} df_confusion = df_confusion.rename(index=str, columns=d) df_confusion.index = range(1, len(df_confusion) + 1) xticklabels = int(round(df_confusion.shape[1] / 10, -1)) yticklabels = int(round(df_confusion.shape[0] / 10, -1)) sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True) # sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True) sns_plot.set_xlabel("Genomes", fontsize=fontsize) sns_plot.set_ylabel("Predicted bins", fontsize=fontsize) plt.yticks(fontsize=12, rotation=0) plt.xticks(fontsize=12) mappable = sns_plot.get_children()[0] cbar_ax = fig.add_axes([.915, .11, .017, .77]) cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical') if log_scale: cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)') else: fmt = lambda x, pos: '{:.0f}'.format(x / 1000000) cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt)) cbar.set_label(fontsize=fontsize, label='Millions of base pairs') cbar.ax.tick_params(labelsize=fontsize) cbar.outline.set_edgecolor(None) axs.set_title(label, fontsize=fontsize, pad=10) axs.set_ylim([len(df_confusion), 0]) # plt.yticks(fontsize=14, rotation=0) # plt.xticks(fontsize=14) output_dir = os.path.join(output_dir, 'genome', label) fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight') fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight') plt.close(fig) if not separate_bar: return # create separate figure for bar fig = plt.figure(figsize=(6, 6)) mappable = sns_plot.get_children()[0] fmt = lambda x, pos: '{:.0f}'.format(x / 1000000) cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt)) text = cbar.ax.yaxis.label font = matplotlib.font_manager.FontProperties(size=16) text.set_font_properties(font) cbar.outline.set_visible(False) cbar.ax.tick_params(labelsize=14) # store separate bar figure plt.gca().set_visible(False) fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight') plt.close(fig) def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools): pd_bins = pd.DataFrame() for sample_id in sample_id_to_queries_list: for query in sample_id_to_queries_list[sample_id]: metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy() metric_df[utils_labels.TOOL] = query.label metric_df['sample_id'] = sample_id metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL]) pd_bins = pd.concat([pd_bins, metric_df]) metric_all = [] for tool in available_tools: pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool] metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist()) fig, axs = plt.subplots(figsize=(6, 5)) medianprops = dict(linewidth=2.5, color='gold') bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.') colors_iter = iter(create_colors_list()) # turn on grid axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey') # force axes to be from 0 to 100% axs.set_xlim([-0.01, 1.01]) # transform plot_labels to percentages vals = axs.get_xticks() axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals]) # enable code to rotate labels tick_labels = axs.get_yticklabels() plt.setp(tick_labels, fontsize=13) ## rotation=55 for box in bplot['boxes']: box.set(facecolor=next(colors_iter), linewidth=0.1) plt.ylim(plt.ylim()[::-1]) if metric_name == 'precision_bp': axs.set_xlabel('Purity per bin (%)', fontsize=13) metric_name = 'purity_bp' else: axs.set_xlabel('Completeness per genome (%)', fontsize=13) metric_name = 'completeness_bp' fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight') fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight') # remove labels but keep grid # axs.get_yaxis().set_ticklabels([]) # for tic in axs.yaxis.get_major_ticks(): # tic.tick1line.set_visible(False) # tic.tick2line.set_visible(False) # tic.label1.set_visible(False) # tic.label2.set_visible(False) # fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight') plt.close(fig) def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel): available_tools = df_results[utils_labels.TOOL].unique() tools = [tool for tool in labels if tool in available_tools] colors_list = create_colors_list() if color_indices: colors_list = [colors_list[i] for i in color_indices] df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools) binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0] if len(df_mean) > len(colors_list): raise RuntimeError("Plot only supports 29 colors") fig, axs = plt.subplots(figsize=(5, 4.5)) # force axes to be from 0 to 100% axs.set_xlim([0.0, 1.0]) axs.set_ylim([0.0, 1.0]) if plot_type == 'e': for i, (tool, df_row) in enumerate(df_mean.iterrows()): axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'], fmt='o', ecolor=colors_list[i], mec=colors_list[i], mfc=colors_list[i], capsize=3, markersize=8) if plot_type == 'f': for i, (tool, df_row) in enumerate(df_mean.iterrows()): axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM], fmt='o', ecolor=colors_list[i], mec=colors_list[i], mfc=colors_list[i], capsize=3, markersize=8) if plot_type == 'w': for i, (tool, df_row) in enumerate(df_mean.iterrows()): axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10) if plot_type == 'x': for i, (tool, df_row) in enumerate(df_mean.iterrows()): axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10) elif plot_type == 'p': for i, (tool, df_row) in enumerate(df_mean.iterrows()): axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10) # turn on grid # axs.minorticks_on() axs.grid(which='major', linestyle=':', linewidth='0.5') # axs.grid(which='minor', linestyle=':', linewidth='0.5') # transform plot_labels to percentages if plot_type != 'p': vals = axs.get_xticks() axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11) else: axs.tick_params(axis='x', labelsize=12) vals = axs.get_yticks() axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11) if rank: file_name = rank + '_' + file_name plt.title(rank) ylabel = ylabel.replace('genome', 'taxon') plt.xlabel(xlabel, fontsize=13) plt.ylabel(ylabel, fontsize=13) plt.tight_layout() fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight') colors_iter = iter(colors_list) circles = [] for x in range(len(df_mean)): circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter))) lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12) fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig) def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None): plot_summary(colors, df_results, labels, output_dir, rank, 'e', 'avg_purity_completeness_bp', 'Average purity per bin (%)', 'Average completeness per genome (%)') plot_summary(colors, df_results, labels, output_dir, rank, 'f', 'avg_purity_completeness_seq', 'Average purity per bin (%)', 'Average completeness per genome (%)') def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None): plot_summary(colors, summary_per_query, labels, output_dir, rank, 'w', 'purity_recall_bp', 'Purity for sample (%)', 'Completeness for sample (%)') plot_summary(colors, summary_per_query, labels, output_dir, rank, 'x', 'purity_completeness_seq', 'Purity for sample (%)', 'Completeness for sample (%)') def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None): plot_summary(colors, summary_per_query, labels, output_dir, rank, 'p', 'ari_vs_assigned_bps', 'Adjusted Rand index', 'Percentage of binned base pairs') def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir): colors_list = ["#006cba", "#008000", "#ba9e00", "red"] for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL): dict_metric_list = [] for metric in metrics_list: rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS]) dict_metric_list.append(rank_to_metric) dict_error_list = [] for error in errors_list: rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS]) dict_error_list.append(rank_to_metric_error) for index, row in pd_results.iterrows(): for rank_to_metric, metric in zip(dict_metric_list, metrics_list): rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric] for rank_to_metric_error, error in zip(dict_error_list, errors_list): rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error] fig, axs = plt.subplots(figsize=(6, 5)) # force axes to be from 0 to 100% axs.set_xlim([0, 7]) axs.set_ylim([0.0, 1.0]) x_values = range(len(load_ncbi_taxinfo.RANKS)) y_values_list = [] for rank_to_metric, color in zip(dict_metric_list, colors_list): y_values = list(rank_to_metric.values()) axs.plot(x_values, y_values, color=color) y_values_list.append(y_values) for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list): sem = list(rank_to_metric_error.values()) plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5) plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical') vals = axs.get_yticks() axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals]) lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False) plt.tight_layout() fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig) def create_contamination_column(pd_tool_bins): pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp'] def create_completeness_minus_contamination_column(pd_tool_bins): pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1 def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir): if len(pd_bins) == 0: return pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp']) create_column_function(pd_bins_copy) colors_list = create_colors_list() fig, axs = plt.subplots(figsize=(6, 5)) tools = pd_bins_copy[utils_labels.TOOL].unique().tolist() for color, tool in zip(colors_list, tools): pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool] pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index() pd_tool_bins = pd_tool_bins.drop(['index'], axis=1) axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color) min_value = pd_bins_copy['newcolumn'].min() axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0) axs.set_xlim(1, None) axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey') # transform plot_labels to percentages vals = axs.get_yticks() axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals]) plt.xlabel(xlabel, fontsize=14) plt.ylabel(ylabel + ' [%]', fontsize=14) lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12) plt.tight_layout() file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '') fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig) def get_number_of_hq_bins(tools, pd_bins): pd_counts = pd.DataFrame() pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp']) for tool in tools: pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool] x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0] x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0] x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0] pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool]) pd_counts = pd_counts.append(pd_tool_counts) return pd_counts def get_number_of_hq_bins_by_score(tools, pd_bins): pd_counts = pd.DataFrame() pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp']) pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1) for tool in tools: pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool] x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0] x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0] x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0] x50 -= x70 x70 -= x90 pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool]) pd_counts = pd_counts.append(pd_tool_counts) return pd_counts def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function): pd_counts = get_bin_counts_function(tools, pd_bins) fig, axs = plt.subplots(figsize=(11, 5)) if output_file == 'bin_counts': fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure() else: fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure() axs.tick_params(axis='x', labelrotation=45, length=0) axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14) axs.set_xlabel(None) # axs.yaxis.set_major_locator(MaxNLocator(integer=True)) h, l = axs.get_legend_handles_labels() axs.set_ylabel('#genome bins', fontsize=14) # axs.grid(which='major', linestyle=':', linewidth='0.5') # axs.grid(which='minor', linestyle=':', linewidth='0.5') ph = [plt.plot([], marker='', ls='')[0]] handles = ph + h if output_file == 'bin_counts': labels = ['Contamination < 10% Completeness '] + l bbox_to_anchor = (0.49, 1.02) else: labels = ['Score '] + l y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist() for i, v in enumerate(y_values): axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold') bbox_to_anchor = (0.47, 1.02) lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5) # plt.subplots_adjust(hspace=0.6, wspace=0.2) fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight') fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight') plt.close(fig)
Java
package web.magic.jvm; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.lang.reflect.UndeclaredThrowableException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.management.MBeanException; import javax.management.MBeanServer; import javax.management.ObjectName; class MBeanTyper { static final boolean DEBUG = Boolean.getBoolean("jboss.jmx.debug"); /** * create a typed object from an mbean */ public static final Object typeMBean(MBeanServer server, ObjectName mbean, Class<?> mainInterface) throws Exception { List<Class<?>> interfaces = new ArrayList<Class<?>>(); if (mainInterface.isInterface()) { interfaces.add(mainInterface); } addInterfaces(mainInterface.getInterfaces(), interfaces); Class<?> cl[] = (Class[]) interfaces.toArray(new Class[interfaces.size()]); if (DEBUG) { System.err.println("typeMean->server=" + server + ",mbean=" + mbean + ",mainInterface=" + mainInterface); for (int c = 0; c < cl.length; c++) { System.err.println(" :" + cl[c]); } } return Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), cl, new MBeanTyperInvoker(server, mbean)); } private static final void addInterfaces(Class<?> cl[], List<Class<?>> list) { if (cl == null) return; for (int c = 0; c < cl.length; c++) { list.add(cl[c]); addInterfaces(cl[c].getInterfaces(), list); } } } /** * MBeanTyperInvoker handles method invocations against the MBeanTyper target * object and forwards them to the MBeanServer and ObjectName for invocation. * * @author <a href="mailto:jhaynie@vocalocity.net">Jeff Haynie</a> */ final class MBeanTyperInvoker implements java.lang.reflect.InvocationHandler { private final MBeanServer server; private final ObjectName mbean; private final Map<Method, String[]> signatureCache = Collections.synchronizedMap(new HashMap<Method, String[]>()); MBeanTyperInvoker(MBeanServer server, ObjectName mbean) { this.server = server; this.mbean = mbean; } private boolean isJMXAttribute(Method m) { String name = m.getName(); return (name.startsWith("get")); } public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { if (MBeanTyper.DEBUG) { System.err.println(" ++ method=" + method.getName() + ",args=" + args); } try { if (method.getDeclaringClass() == Object.class) { String name = method.getName(); if (name.equals("hashCode")) { return new Integer(this.hashCode()); } else if (name.equals("toString")) { return this.toString(); } else if (name.equals("equals")) { // FIXME: this needs to be reviewed - we should be // smarter about this ... return new Boolean(equals(args[0])); } } else if (isJMXAttribute(method) && (args == null || args.length <= 0)) { String name = method.getName().substring(3); return server.getAttribute(mbean, name); } String sig[] = (String[]) signatureCache.get(method); if (sig == null) { // get the method signature from the method argument directly // vs. the arguments passed, since there may be primitives that // are wrapped as objects in the arguments Class<?> _args[] = method.getParameterTypes(); if (_args != null && _args.length > 0) { sig = new String[_args.length]; for (int c = 0; c < sig.length; c++) { if (_args[c] != null) { sig[c] = _args[c].getName(); } } } else { sig = new String[0]; } signatureCache.put(method, sig); } return server.invoke(mbean, method.getName(), args, sig); } catch (Throwable t) { if (MBeanTyper.DEBUG) { t.printStackTrace(); } if (t instanceof UndeclaredThrowableException) { UndeclaredThrowableException ut = (UndeclaredThrowableException) t; throw ut.getUndeclaredThrowable(); } else if (t instanceof InvocationTargetException) { InvocationTargetException it = (InvocationTargetException) t; throw it.getTargetException(); } else if (t instanceof MBeanException) { MBeanException me = (MBeanException) t; throw me.getTargetException(); } else { throw t; } } } }
Java
# grid Cheatsheet > `display: grid` 布局 源自:http://grid.malven.co
Java
// ---------------------------------------------------------------------------- // Module initialization var Config = require("config").config; var utils = require("utils"); var validators = require("validators"); // ---------------------------------------------------------------------------- // Setting class. function Setting() { $.title_label.text_id = this.args.title_id; $.title_label.text = Alloy.Globals.L(this.args.title_id); // This will trigger UI update. Ugly solution I know. $.setting.top = this.args.top || 0; if (typeof this.args.width !== 'undefined') { $.setting.width = this.args.width; } // Listen to the "SettingChanges" event. It simply updates the string // representation of the property that the view shows. this.addSettingsChangedHandler(this.updateValue); } // Inherits from Controller... Setting.prototype = new (require("controller"))( arguments[0], [$.title_label] ); // Read the actual value of the property that this setting is responsible for Setting.prototype.updateValue = function() { $.setting_value.text = Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue()); }; Setting.prototype.handleClick = function (initial, use, validator) { var self = this; var arg = { useValue: function(value) { if (eval("validators." + validator + "(value)")) { use(self.args.propertyName, value); self.updateValue(); } else { alert(Alloy.Globals.L("illegal_value")); } }, value: initial, validator: validator }; utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg); }; Setting.prototype.clickHandler = function() { var initial = Config.getProperty(this.args.propertyName).get(); var validator = typeof this.args.validator !== 'undefined' ? this.args.validator : "ok"; function use(n, v) { Config.getProperty(n).set(v); } this.handleClick(initial, use, validator); }; // ---------------------------------------------------------------------------- // Create the object representing this particular setting var setting = new Setting(); // Handling button click event function onClick(e) { setting.clickHandler(); }
Java
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags --> <title><?php print $title; ?></title> <!-- Bootstrap --> <link href="css/bootstrap.min.css" rel="stylesheet"> <!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries --> <!-- WARNING: Respond.js doesn't work if you view the page via file:// --> <!--[if lt IE 9]> <script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script> <script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script> <![endif]--> </head>
Java
<?php namespace Tecnoready\Common\Service\ObjectManager; /** * Trait de campos de configuracion * * @author Carlos Mendoza <inhack20@gmail.com> */ trait TraitConfigure { /** * Tipo de objeto que genera el historial (Facturas,Presupuestos,Contratos) * @var string */ private $objectType; /** * Identificador unico del objeto dueno de los archivos (14114,DF-23454) * @var string */ private $objectId; public function configure($objectId, $objectType,array $options = []) { $this->objectId = $objectId; $this->objectType = $objectType; } }
Java
Map_347E30: dc.w Frame_347E4A-Map_347E30 dc.w Frame_347E4C-Map_347E30 dc.w Frame_347E66-Map_347E30 dc.w Frame_347E80-Map_347E30 dc.w Frame_347E9A-Map_347E30 dc.w Frame_347EB4-Map_347E30 dc.w Frame_347ECE-Map_347E30 dc.w Frame_347EE2-Map_347E30 dc.w Frame_347EFC-Map_347E30 dc.w Frame_347F16-Map_347E30 dc.w Frame_347F30-Map_347E30 dc.w Frame_347F4A-Map_347E30 dc.w Frame_347F6A-Map_347E30 Frame_347E4A: dc.w 0 Frame_347E4C: dc.w 4 dc.b $FC, $F, 0, 0,$FF,$FA dc.b $EC, 7, 0,$10,$FF,$EA dc.b $EC, 9, 0,$18,$FF,$FA dc.b $DC, 9, 0,$1E,$FF,$F2 Frame_347E66: dc.w 4 dc.b $EE, 8, 0, 0,$FF,$F0 dc.b $F6, $D, 0, 3,$FF,$F0 dc.b 6, 8, 0, $B,$FF,$F8 dc.b $E, 6, 0, $E,$FF,$F8 Frame_347E80: dc.w 4 dc.b $E9, $A, 0, 0,$FF,$F1 dc.b $F9, 4, 0, 9, 0, 9 dc.b 1, $D, 0, $B,$FF,$F1 dc.b $11, 9, 0,$13,$FF,$E9 Frame_347E9A: dc.w 4 dc.b $EA, $F, 0, 0,$FF,$F3 dc.b $A, 8, 0,$10,$FF,$F3 dc.b $12, $C, 0,$13,$FF,$EB dc.b $1A, 8, 0,$17,$FF,$EB Frame_347EB4: dc.w 4 dc.b $EA, 8, 0, 0,$FF,$F5 dc.b $F2, $E, 0, 3,$FF,$ED dc.b $A, 8, 0, $F,$FF,$F5 dc.b $12, $D, 0,$12,$FF,$F5 Frame_347ECE: dc.w 3 dc.b $EF, $F, 0, 0,$FF,$EC dc.b $F, $C, 0,$10,$FF,$E4 dc.b $F, 8, 0,$14, 0, 4 Frame_347EE2: dc.w 4 dc.b $EF, $F, 0, 0,$FF,$EC dc.b $F, $C, 0,$10,$FF,$E4 dc.b $F, 8, 0,$14, 0, 4 dc.b 7, 0, 0,$17, 0,$14 Frame_347EFC: dc.w 4 dc.b $EF, $F, 0, 0,$FF,$EC dc.b 7, 4, 0,$10, 0, $C dc.b $F, $C, 0,$12,$FF,$E4 dc.b $F, 0, 0,$16, 0, 4 Frame_347F16: dc.w 4 dc.b $F1, $E, 0, 0,$FF,$E5 dc.b $F1, 6, 0, $C, 0, 5 dc.b 9, $C, 0,$12,$FF,$ED dc.b $11, $A, 0,$16,$FF,$ED Frame_347F30: dc.w 4 dc.b $EB, $F, 0, 0,$FF,$F6 dc.b $F3, $A, 0,$10,$FF,$DE dc.b $B, $C, 0,$19,$FF,$EE dc.b $13, 9, 0,$1D,$FF,$F6 Frame_347F4A: dc.w 5 dc.b $EE, $F, 0, 0,$FF,$EC dc.b $FE, 0, 0,$10, 0, $C dc.b $E, $C, 0,$11,$FF,$E4 dc.b $E, 0, 0,$15, 0, 4 dc.b $16, $C, 0,$16,$FF,$FC Frame_347F6A: dc.w 5 dc.b $EA, 8, 0, 0,$FF,$EE dc.b $F2, $E, 0, 3,$FF,$EE dc.b $A, $C, 0, $F,$FF,$E6 dc.b $12, $C, 0,$13,$FF,$EE dc.b $1A, $C, 0,$17,$FF,$FE
Java
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.3.1"/> <title>Ilwis-Objects: util/box.h Source File</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { searchBox.OnSelectItem(0); }); </script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectlogo"><img alt="Logo" src="ilwisobjectsgeneral.PNG"/></td> <td style="padding-left: 0.5em;"> <div id="projectname">Ilwis-Objects &#160;<span id="projectnumber">1.0</span> </div> <div id="projectbrief">GIS and Remote Sensing framework for data access and processing</div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.3.1 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="annotated.html"><span>Classes</span></a></li> <li class="current"><a href="files.html"><span>Files</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="files.html"><span>File&#160;List</span></a></li> </ul> </div> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> <a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Pages</a></div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div id="nav-path" class="navpath"> <ul> <li class="navelem"><a class="el" href="dir_23ec12649285f9fabf3a6b7380226c28.html">util</a></li> </ul> </div> </div><!-- top --> <div class="header"> <div class="headertitle"> <div class="title">box.h</div> </div> </div><!--header--> <div class="contents"> <div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="preprocessor">#ifndef BOX_H</span></div> <div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="preprocessor"></span><span class="preprocessor">#define BOX_H</span></div> <div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="preprocessor"></span></div> <div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="preprocessor">#include &lt;QSize&gt;</span></div> <div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="preprocessor">#include &quot;size.h&quot;</span></div> <div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="preprocessor">#include &quot;errmessages.h&quot;</span></div> <div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="preprocessor">#include &quot;range.h&quot;</span></div> <div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;</div> <div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="keyword">namespace </span>Ilwis {</div> <div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">class</span> Po<span class="keywordtype">int</span>Type=Coordinate&gt; <span class="keyword">class </span>Box : <span class="keyword">public</span> Range{</div> <div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="keyword">public</span>:</div> <div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160; <span class="keyword">enum</span> Dimension{dim0=0, dimX=1, dimY=2, dimZ=4};</div> <div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;</div> <div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160; Box() : _min_corner(PointType(0,0,0)), _max_corner(PointType(0,0,0)){</div> <div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160; }</div> <div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;</div> <div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160; Box(<span class="keyword">const</span> PointType&amp; pMin, <span class="keyword">const</span> PointType&amp; pMax) : _min_corner(pMin), _max_corner(pMax){</div> <div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160; normalize();</div> <div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160; }</div> <div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;</div> <div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160; Box(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; bx) : _min_corner(bx.min_corner()), _max_corner(bx.max_corner()) {</div> <div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;</div> <div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160; }</div> <div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;</div> <div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160; Box(Box&lt;PointType&gt;&amp;&amp; box) :</div> <div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160; _min_corner(std::move(box._min_corner)),</div> <div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160; _max_corner(std::move(box._max_corner))</div> <div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160; {</div> <div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160; box._min_corner = box._max_corner = PointType();</div> <div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160; }</div> <div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;</div> <div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160; Box(<span class="keyword">const</span> QSize&amp; sz) : _min_corner(PointType(0,0,0)),_max_corner(PointType(sz.width()-1, sz.height()-1),0){</div> <div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160; }</div> <div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;</div> <div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; Box(<span class="keyword">const</span> Size&lt;T&gt;&amp; sz) : _min_corner(PointType(0,0,0)),_max_corner(PointType(sz.xsize()-1, sz.ysize()-1,sz.zsize()-1)){</div> <div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160; }</div> <div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;</div> <div class="line"><a name="l00046"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#adec0fd6f9fb44de378ca4766f70dd779"> 46</a></span>&#160; <a class="code" href="class_ilwis_1_1_box.html#adec0fd6f9fb44de378ca4766f70dd779">Box</a>(<span class="keyword">const</span> QString&amp; envelope) : _min_corner(PointType(0,0)), _max_corner(PointType(0,0)){</div> <div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; <span class="keywordtype">int</span> index1 = envelope.indexOf(<span class="stringliteral">&quot;(&quot;</span>);</div> <div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; <span class="keywordflow">if</span> ( index1 != -1) {</div> <div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160; <span class="keywordtype">int</span> index2 = envelope.indexOf(<span class="stringliteral">&quot;)&quot;</span>) ;</div> <div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160; <span class="keywordflow">if</span> ( index2 == -1){</div> <div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; <span class="keywordflow">return</span>;</div> <div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160; }</div> <div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;</div> <div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; QString coords = envelope.mid(index1+1, index2 - index1 - 1);</div> <div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; coords = coords.trimmed();</div> <div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160; QStringList parts = coords.split(<span class="stringliteral">&quot;,&quot;</span>);</div> <div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; <span class="keywordflow">if</span> ( parts.size() != 2){</div> <div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; <span class="keywordflow">return</span>;</div> <div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; }</div> <div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160; QStringList p1 = parts[0].trimmed().split(<span class="charliteral">&#39; &#39;</span>);</div> <div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; <span class="keywordflow">if</span> ( p1.size() &lt; 2)</div> <div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160; <span class="keywordflow">return</span>;</div> <div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; this-&gt;min_corner().x = p1[0].trimmed().toDouble();</div> <div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; this-&gt;min_corner().y = p1[1].trimmed().toDouble();</div> <div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; <span class="keywordflow">if</span> ( p1.size() == 3)</div> <div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; this-&gt;min_corner().z = p1[2].trimmed().toDouble();</div> <div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;</div> <div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; QStringList p2 = parts[1].trimmed().split(<span class="charliteral">&#39; &#39;</span>);</div> <div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; <span class="keywordflow">if</span> ( p1.size() &lt; 2) {</div> <div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160; this-&gt;min_corner().x = 0;</div> <div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; this-&gt;min_corner().y = 0;</div> <div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; this-&gt;min_corner().z = 0;</div> <div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; <span class="keywordflow">return</span>;</div> <div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; }</div> <div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160; this-&gt;max_corner().x = p2[0].trimmed().toDouble();</div> <div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; this-&gt;max_corner().y = p2[1].trimmed().toDouble();</div> <div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="keywordflow">if</span> ( p2.size() == 3)</div> <div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; this-&gt;max_corner().z = p2[2].trimmed().toDouble();</div> <div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; }</div> <div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; }</div> <div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160;</div> <div class="line"><a name="l00082"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab0511c11ae04999d283f67c6ea27cee4"> 82</a></span>&#160; IlwisTypes <a class="code" href="class_ilwis_1_1_box.html#ab0511c11ae04999d283f67c6ea27cee4" title="valueType returns the type of values contained in the range">valueType</a>()<span class="keyword"> const</span>{</div> <div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; <span class="keywordflow">return</span> max_corner().valuetype();</div> <div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; }</div> <div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160;</div> <div class="line"><a name="l00086"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab8771e4e5dda06e115eba2750ec9c255"> 86</a></span>&#160; <a class="code" href="class_ilwis_1_1_range.html" title="The Range class base interface for all objects that need to define a range of values.">Range</a> *<a class="code" href="class_ilwis_1_1_box.html#ab8771e4e5dda06e115eba2750ec9c255">clone</a>()<span class="keyword"> const</span>{</div> <div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; <span class="keywordflow">return</span> <span class="keyword">new</span> <a class="code" href="class_ilwis_1_1_box.html">Box&lt;PointType&gt;</a>(*this);</div> <div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; }</div> <div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160;</div> <div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160;</div> <div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; PointType min_corner()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; <span class="keywordflow">return</span> _min_corner;</div> <div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; }</div> <div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160;</div> <div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; PointType max_corner()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; <span class="keywordflow">return</span> _max_corner;</div> <div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; }</div> <div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;</div> <div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; PointType&amp; min_corner() {</div> <div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; <span class="keywordflow">return</span> _min_corner;</div> <div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; }</div> <div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;</div> <div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; PointType&amp; max_corner() {</div> <div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; <span class="keywordflow">return</span> _max_corner;</div> <div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; }</div> <div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160;</div> <div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; <span class="keywordtype">double</span> xlength()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; <span class="keywordflow">return</span> std::abs(this-&gt;min_corner().x - this-&gt;max_corner().x) + 1;</div> <div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; }</div> <div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160;</div> <div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; <span class="keywordtype">double</span> ylength()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; <span class="keywordflow">return</span> std::abs(this-&gt;min_corner().y - this-&gt;max_corner().y) + 1;</div> <div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; }</div> <div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;</div> <div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; <span class="keywordtype">double</span> zlength()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; <span class="keywordflow">return</span> std::abs(this-&gt;min_corner().z - this-&gt;max_corner().z) + 1;</div> <div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; }</div> <div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;</div> <div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; <span class="keyword">template</span>&lt;<span class="keyword">typename</span> T=qu<span class="keywordtype">int</span>32&gt; Size&lt;T&gt; size()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; <span class="keywordflow">return</span> Size&lt;T&gt;(xlength(), ylength(), zlength());</div> <div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; }</div> <div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160;</div> <div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; <span class="keywordtype">bool</span> is3D()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; <span class="keywordflow">return</span> this-&gt;min_corner().is3D() &amp;&amp; this-&gt;max_corner().is3D();</div> <div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; }</div> <div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; quint64 area()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; <span class="keywordflow">if</span> ( !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div> <div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; <span class="keywordflow">return</span> 0;</div> <div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; <span class="keywordflow">return</span> xlength() * ylength();</div> <div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; }</div> <div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160;</div> <div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; quint64 volume()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; <span class="keywordflow">if</span> (!is3D())</div> <div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <span class="keywordflow">return</span> area();</div> <div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <span class="keywordflow">return</span> xlength() * ylength() * zlength();</div> <div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; }</div> <div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160;</div> <div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; <span class="keywordtype">bool</span> contains(<span class="keyword">const</span> PointType&amp; p)<span class="keyword"> const </span>{</div> <div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; <span class="keywordflow">if</span> (!p.isValid())</div> <div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; <span class="keywordflow">if</span>(!<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div> <div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160;</div> <div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; <span class="keyword">const</span> PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; <span class="keyword">const</span> PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; <span class="keywordtype">bool</span> ok = p.x &gt;= pmin.x &amp;&amp; p.x &lt;= pmax.x &amp;&amp;</div> <div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; p.y &gt;= pmin.y &amp;&amp; p.y &lt;= pmax.y;</div> <div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; <span class="keywordflow">if</span> ( is3D() &amp;&amp; p.is3D()) {</div> <div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; ok = p.z &gt;= pmin.z &amp;&amp; p.z &lt;= pmax.z;</div> <div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; }</div> <div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; <span class="keywordflow">return</span> ok;</div> <div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; }</div> <div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160;</div> <div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; <span class="keywordtype">bool</span> contains(Box&lt;PointType&gt;&amp; box)<span class="keyword"> const</span>{</div> <div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; <span class="keywordflow">return</span> contains(box.min_corner()) &amp;&amp; contains(box.max_corner());</div> <div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; }</div> <div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160;</div> <div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <span class="keywordtype">bool</span> contains(<span class="keyword">const</span> QVariant&amp; value, <span class="keywordtype">bool</span> inclusive = <span class="keyword">true</span>)<span class="keyword"> const </span>{</div> <div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; <span class="comment">//TODO:</span></div> <div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; }</div> <div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160;</div> <div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <span class="keywordtype">bool</span> equals(Box&lt;PointType&gt;&amp; box, <span class="keywordtype">double</span> delta=0)<span class="keyword"> const </span>{</div> <div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; <span class="keywordflow">if</span> ( !box.isValid())</div> <div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <span class="keywordflow">if</span> (!<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div> <div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160;</div> <div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; <span class="keyword">const</span> PointType&amp; pmin = box.min_corner();</div> <div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <span class="keyword">const</span> PointType&amp; pmax = box.max_corner();</div> <div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160;</div> <div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; <span class="keywordflow">if</span> ( std::abs( min_corner.x - pmin.x) &gt; delta)</div> <div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; <span class="keywordflow">if</span> ( std::abs( min_corner.y - pmin.y) &gt; delta)</div> <div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; <span class="keywordflow">if</span> ( std::abs( max_corner.x - pmax.x) &gt; delta)</div> <div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <span class="keywordflow">if</span> ( std::abs( max_corner.y - pmax.y) &gt; delta)</div> <div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <span class="keywordflow">if</span> ( is3D() &amp;&amp; box.is3D()) {</div> <div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <span class="keywordflow">if</span> ( std::abs( min_corner.z - pmin.z) &gt; delta)</div> <div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; <span class="keywordflow">if</span> ( std::abs( max_corner.z - pmax.z) &gt; delta)</div> <div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; }</div> <div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <span class="keywordflow">return</span> <span class="keyword">true</span>;</div> <div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; }</div> <div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160;</div> <div class="line"><a name="l00189"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990"> 189</a></span>&#160; <span class="keywordtype">bool</span> <a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <span class="keywordflow">return</span> this-&gt;min_corner().isValid() &amp;&amp; this-&gt;max_corner().isValid();</div> <div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; }</div> <div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160;</div> <div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; <span class="keywordtype">bool</span> isNull()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; <span class="keywordtype">bool</span> ok = this-&gt;min_corner().x == 0 &amp;&amp; this-&gt;min_corner().y == 0 &amp;&amp;</div> <div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; this-&gt;max_corner().x == 0 &amp;&amp; this-&gt;max_corner().y == 0;</div> <div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <span class="keywordflow">if</span> ( is3D()){</div> <div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; ok &amp;= this-&gt;min_corner().z == 0 &amp;&amp; this-&gt;max_corner().z == 0;</div> <div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; }</div> <div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; <span class="keywordflow">return</span> ok;</div> <div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; }</div> <div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160;</div> <div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; Box&lt;PointType&gt;&amp; operator=(Box&lt;PointType&gt;&amp;&amp; box) {</div> <div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; _min_corner = std::move(box._min_corner);</div> <div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; _max_corner = std::move(box._max_corner);</div> <div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160;</div> <div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; box._min_corner = box._max_corner = PointType();</div> <div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; }</div> <div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160;</div> <div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; Box&lt;PointType&gt;&amp; operator=(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box) {</div> <div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; _min_corner = std::move(box._min_corner);</div> <div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; _max_corner = std::move(box._max_corner);</div> <div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; }</div> <div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160;</div> <div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; Box&lt;PointType&gt;&amp; operator +=(<span class="keyword">const</span> <span class="keywordtype">double</span>&amp; v) {</div> <div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; <span class="keywordflow">if</span> ( isNumericalUndef(v))</div> <div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160;</div> <div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; pmin -= v;</div> <div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; pmax += v;</div> <div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; normalize();</div> <div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; }</div> <div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;</div> <div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; Box&lt;PointType&gt;&amp; operator *=(<span class="keyword">const</span> <span class="keywordtype">double</span>&amp; v) {</div> <div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; <span class="keywordflow">if</span> ( isNumericalUndef(v))</div> <div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160; <span class="keywordtype">double</span> deltaX = xlength() * v / 2;</div> <div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160; <span class="keywordtype">double</span> deltaY = ylength() * v / 2;</div> <div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160; <span class="keywordtype">double</span> deltaZ = 1;</div> <div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160; <span class="keywordflow">if</span> ( is3D())</div> <div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160; deltaZ = zlength() * v / 2;</div> <div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; pmin *= {deltaX, deltaY, deltaZ};</div> <div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160; pmax *= {deltaX, deltaY, deltaZ};</div> <div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; normalize();</div> <div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; }</div> <div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160;</div> <div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160;Box&lt;PointType&gt;&amp; operator +=(<span class="keyword">const</span> PointType&amp; pnew) {</div> <div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; <span class="keywordflow">if</span> ( !pnew.isValid())</div> <div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160;</div> <div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160;</div> <div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160;</div> <div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160; PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160; <span class="keywordflow">if</span> ( isNull() || !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()) {</div> <div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; pmin = pnew;</div> <div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; pmax = pnew;</div> <div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160; }</div> <div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160;</div> <div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160; <span class="keywordflow">if</span> ( contains(pnew))</div> <div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; <span class="keywordflow">if</span> ( pmin.x &gt; pnew.x)</div> <div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; pmin.x = pnew.x;</div> <div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; <span class="keywordflow">if</span> ( pmin.y &gt; pnew.y)</div> <div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; pmin.y = pnew.y;</div> <div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160; <span class="keywordflow">if</span> ( pmax.x &lt; pnew.x)</div> <div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160; pmax.x = pnew.x;</div> <div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160; <span class="keywordflow">if</span> ( pmax.y &lt; pnew.y)</div> <div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; pmax.y = pnew.y;</div> <div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; <span class="keywordflow">if</span> ( is3D() &amp;&amp; pnew.is3D()){</div> <div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; <span class="keywordflow">if</span> ( pmin.z &gt; pnew.z)</div> <div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160; pmin.z = pnew.z;</div> <div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160; <span class="keywordflow">if</span> ( pmax.z &lt; pnew.z)</div> <div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160; pmax.z = pnew.z;</div> <div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160; }</div> <div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160; normalize();</div> <div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160;</div> <div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160;</div> <div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160;}</div> <div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160;</div> <div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160;Box&lt;PointType&gt;&amp; operator -=(<span class="keyword">const</span> PointType&amp; pnew) {</div> <div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160; <span class="keywordflow">if</span> ( !pnew.isValid())</div> <div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160;</div> <div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160; PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160; PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160;</div> <div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160; <span class="keywordflow">if</span> ( isNull() || !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()) {</div> <div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160; pmin = pnew;</div> <div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160; pmax = pnew;</div> <div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160; }</div> <div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160;</div> <div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160; <span class="keywordflow">if</span> ( !contains(pnew))</div> <div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160; <span class="keywordflow">if</span> ( pmin.x() &lt; pnew.x())</div> <div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160; pmin.x = pnew.x();</div> <div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160; <span class="keywordflow">if</span> ( pmin.y &lt; pnew.y)</div> <div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160; pmin.y = pnew.y();</div> <div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160; <span class="keywordflow">if</span> ( pmax.x &gt; pnew.x)</div> <div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; pmax.x = pnew.x();</div> <div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160; <span class="keywordflow">if</span> ( pmax.y &gt; pnew.y)</div> <div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160; pmax.y = pnew.y();</div> <div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160; <span class="keywordflow">if</span> ( is3D() &amp;&amp; pnew.is3D()){</div> <div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160; <span class="keywordflow">if</span> ( pmin.z &lt; pnew.z)</div> <div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; pmin.z = pnew.z;</div> <div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; <span class="keywordflow">if</span> ( pmax.z &gt; pnew.z)</div> <div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; pmax.z = pnew.z;</div> <div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; }</div> <div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160; normalize();</div> <div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160;</div> <div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160;</div> <div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160;}</div> <div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160;</div> <div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">class</span> T&gt; Box&lt;PointType&gt;&amp; operator +=(<span class="keyword">const</span> std::vector&lt;T&gt;&amp; vec) {</div> <div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; <span class="keywordtype">int</span> size = vec.size();</div> <div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; <span class="keywordflow">if</span> ( size == 2 || size == 3) {</div> <div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; this-&gt;min_corner() += vec;</div> <div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160; this-&gt;max_corner() += vec;</div> <div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; normalize();</div> <div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; }</div> <div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160;</div> <div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160;}</div> <div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160;</div> <div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160;Box&lt;PointType&gt;&amp; operator +=(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box) {</div> <div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160; <span class="keywordflow">if</span> ( !box.isValid())</div> <div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160;</div> <div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160; operator+=(box.min_corner());</div> <div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; operator+=(box.max_corner());</div> <div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160; <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div> <div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160;}</div> <div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160;</div> <div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160;<span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box )<span class="keyword"> const </span>{</div> <div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160; <span class="keywordflow">if</span> ( !box.isValid())</div> <div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div> <div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160;</div> <div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160; <span class="keywordflow">return</span> box.max_corner() == this-&gt;max_corner() &amp;&amp; this-&gt;min_corner() == box.min_corner();</div> <div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160;}</div> <div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160;</div> <div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160;<span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box )<span class="keyword"> const </span>{</div> <div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160; <span class="keywordflow">return</span> !(operator==(box));</div> <div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160;}</div> <div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160;</div> <div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160;QVariant impliedValue(<span class="keyword">const</span> QVariant&amp; v)<span class="keyword"> const</span>{</div> <div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; QString type = v.typeName();</div> <div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; <span class="keywordtype">bool</span> ok = type == <span class="stringliteral">&quot;Ilwis::Box&lt;Pixel&gt;&quot;</span> || type == <span class="stringliteral">&quot;Ilwis::Box&lt;Coordinate&gt;&quot;</span> ||</div> <div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160; type == <span class="stringliteral">&quot;Ilwis::Box&lt;Pixeld&gt;&quot;</span> ;</div> <div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; <span class="keywordflow">if</span> (!ok){</div> <div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; <span class="keywordflow">return</span> sUNDEF;</div> <div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; }</div> <div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; <span class="keywordflow">if</span> ( type == <span class="stringliteral">&quot;Ilwis::Box&lt;Coordinate&gt;&quot;</span>){</div> <div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; Box&lt;Coordinate&gt; box = v.value&lt;Box&lt;Coordinate&gt;&gt;();</div> <div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; <span class="keywordflow">return</span> box.toString();</div> <div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160; }</div> <div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; <span class="keywordflow">if</span> ( type == <span class="stringliteral">&quot;Ilwis::Box&lt;Pixel&gt;&quot;</span>){</div> <div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; Box&lt;Pixel&gt; box = v.value&lt;Box&lt;Pixel&gt;&gt;();</div> <div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; <span class="keywordflow">return</span> box.toString();</div> <div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160; }</div> <div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160; <span class="keywordflow">if</span> ( type == <span class="stringliteral">&quot;Ilwis::Box&lt;Pixeld&gt;&quot;</span>){</div> <div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160; Box&lt;Pixeld&gt; box = v.value&lt;Box&lt;Pixeld&gt;&gt;();</div> <div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; <span class="keywordflow">return</span> box.toString();</div> <div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; }</div> <div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160; <span class="keywordflow">return</span> sUNDEF;</div> <div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160;</div> <div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160;}</div> <div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160;</div> <div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> T&gt; <span class="keywordtype">void</span> ensure(<span class="keyword">const</span> Size&lt;T&gt;&amp; sz) {</div> <div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160; <span class="keywordflow">if</span> ( xlength() &gt; sz.xsize()) {</div> <div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; this-&gt;max_corner().x = sz.xsize() - 1 ;</div> <div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160; }</div> <div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; <span class="keywordflow">if</span> ( ylength() &gt; sz.ysize()) {</div> <div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160; this-&gt;max_corner().y = sz.ysize() - 1 ;</div> <div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160; }</div> <div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; <span class="keywordflow">if</span> ( zlength() &gt; sz.zsize()) {</div> <div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; this-&gt;max_corner().z = sz.zsize() - 1 ;</div> <div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; }</div> <div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160;}</div> <div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160;</div> <div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160;<span class="keywordtype">void</span> copyFrom(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box, quint32 dimensions=dimX | dimY | dimZ) {</div> <div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; <span class="keywordflow">if</span> ( dimensions &amp; dimX) {</div> <div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; this-&gt;min_corner().x = box.min_corner().x;</div> <div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; this-&gt;max_corner().x =box.max_corner().x;</div> <div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160; }</div> <div class="line"><a name="l00384"></a><span class="lineno"> 384</span>&#160; <span class="keywordflow">if</span> ( dimensions &amp; dimY) {</div> <div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; this-&gt;min_corner().y = box.min_corner().y;</div> <div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; this-&gt;max_corner().y = box.max_corner().y;</div> <div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; }</div> <div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160; <span class="keywordflow">if</span> ( dimensions &amp; dimZ) {</div> <div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; this-&gt;min_corner().z = box.min_corner().z;</div> <div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; this-&gt;max_corner().z = box.max_corner().z;</div> <div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160; }</div> <div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160;}</div> <div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160;</div> <div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160;</div> <div class="line"><a name="l00395"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab1b9531b3c86db9a4d373ac53c4f910b"> 395</a></span>&#160;QString <a class="code" href="class_ilwis_1_1_box.html#ab1b9531b3c86db9a4d373ac53c4f910b">toString</a>()<span class="keyword"> const </span>{</div> <div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; <span class="keywordflow">if</span> ( is3D()) {</div> <div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; <span class="keywordflow">if</span> (this-&gt;min_corner().valuetype() == itDOUBLE)</div> <div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160; <span class="keywordflow">return</span> QString(<span class="stringliteral">&quot;POLYGON(%1 %2 %3,%4 %5 %6)&quot;</span>).</div> <div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;min_corner().x,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;min_corner().y,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;min_corner().z,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;max_corner().x,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;max_corner().y,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;max_corner().z,0,<span class="charliteral">&#39;g&#39;</span>);</div> <div class="line"><a name="l00405"></a><span class="lineno"> 405</span>&#160; <span class="keywordflow">else</span></div> <div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; <span class="keywordflow">return</span> QString(<span class="stringliteral">&quot;POLYGON(%1 %2 %3,%4 %5 %6)&quot;</span>).arg(this-&gt;min_corner().x).</div> <div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; arg(this-&gt;min_corner().y).</div> <div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160; arg(this-&gt;min_corner().z).</div> <div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160; arg(this-&gt;max_corner().x).</div> <div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160; arg(this-&gt;max_corner().y).</div> <div class="line"><a name="l00411"></a><span class="lineno"> 411</span>&#160; arg(this-&gt;max_corner().z);</div> <div class="line"><a name="l00412"></a><span class="lineno"> 412</span>&#160;</div> <div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160;</div> <div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160; }<span class="keywordflow">else</span> {</div> <div class="line"><a name="l00415"></a><span class="lineno"> 415</span>&#160; <span class="keywordflow">if</span> (this-&gt;min_corner().valuetype() == itDOUBLE)</div> <div class="line"><a name="l00416"></a><span class="lineno"> 416</span>&#160; <span class="keywordflow">return</span> QString(<span class="stringliteral">&quot;POLYGON(%1 %2,%3 %4)&quot;</span>).</div> <div class="line"><a name="l00417"></a><span class="lineno"> 417</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;min_corner().x,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00418"></a><span class="lineno"> 418</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;min_corner().y,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00419"></a><span class="lineno"> 419</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;max_corner().x,0,<span class="charliteral">&#39;g&#39;</span>).</div> <div class="line"><a name="l00420"></a><span class="lineno"> 420</span>&#160; arg((<span class="keywordtype">double</span>)this-&gt;max_corner().y,0,<span class="charliteral">&#39;g&#39;</span>);</div> <div class="line"><a name="l00421"></a><span class="lineno"> 421</span>&#160; <span class="keywordflow">else</span></div> <div class="line"><a name="l00422"></a><span class="lineno"> 422</span>&#160; <span class="keywordflow">return</span> QString(<span class="stringliteral">&quot;POLYGON(%1 %2,%3 %4)&quot;</span>).</div> <div class="line"><a name="l00423"></a><span class="lineno"> 423</span>&#160; arg(this-&gt;min_corner().x).</div> <div class="line"><a name="l00424"></a><span class="lineno"> 424</span>&#160; arg(this-&gt;min_corner().y).</div> <div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160; arg(this-&gt;max_corner().x).</div> <div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160; arg(this-&gt;max_corner().y);</div> <div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160; }</div> <div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160;</div> <div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160;}</div> <div class="line"><a name="l00430"></a><span class="lineno"> 430</span>&#160;</div> <div class="line"><a name="l00431"></a><span class="lineno"> 431</span>&#160;<span class="keyword">private</span>:</div> <div class="line"><a name="l00432"></a><span class="lineno"> 432</span>&#160; PointType _min_corner;</div> <div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160; PointType _max_corner;</div> <div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160;</div> <div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160;</div> <div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160;<span class="keywordtype">void</span> normalize() {</div> <div class="line"><a name="l00437"></a><span class="lineno"> 437</span>&#160; PointType&amp; pmin = this-&gt;min_corner();</div> <div class="line"><a name="l00438"></a><span class="lineno"> 438</span>&#160; PointType&amp; pmax = this-&gt;max_corner();</div> <div class="line"><a name="l00439"></a><span class="lineno"> 439</span>&#160; <span class="keywordflow">if</span> ( pmin.x &gt; pmax.x) {</div> <div class="line"><a name="l00440"></a><span class="lineno"> 440</span>&#160; <span class="keywordtype">double</span> v1 = pmin.x;</div> <div class="line"><a name="l00441"></a><span class="lineno"> 441</span>&#160; <span class="keywordtype">double</span> v2 = pmax.x;</div> <div class="line"><a name="l00442"></a><span class="lineno"> 442</span>&#160; std::swap(v1, v2);</div> <div class="line"><a name="l00443"></a><span class="lineno"> 443</span>&#160; pmin.x = v1;</div> <div class="line"><a name="l00444"></a><span class="lineno"> 444</span>&#160; pmax.x = v2;</div> <div class="line"><a name="l00445"></a><span class="lineno"> 445</span>&#160;</div> <div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160; }</div> <div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160; <span class="keywordflow">if</span> ( pmin.y &gt; pmax.y) {</div> <div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160; <span class="keywordtype">double</span> v1 = pmin.y;</div> <div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160; <span class="keywordtype">double</span> v2 = pmax.y;</div> <div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160; std::swap(v1, v2);</div> <div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160; pmin.y = v1;</div> <div class="line"><a name="l00452"></a><span class="lineno"> 452</span>&#160; pmax.y = v2;</div> <div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160; }</div> <div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160; <span class="keywordflow">if</span> ( pmin.z &gt; pmax.z) {</div> <div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160; <span class="keywordtype">double</span> v1 = pmin.z;</div> <div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160; <span class="keywordtype">double</span> v2 = pmax.z;</div> <div class="line"><a name="l00457"></a><span class="lineno"> 457</span>&#160; std::swap(v1, v2);</div> <div class="line"><a name="l00458"></a><span class="lineno"> 458</span>&#160; pmin.z = v1;</div> <div class="line"><a name="l00459"></a><span class="lineno"> 459</span>&#160; pmax.z = v2;</div> <div class="line"><a name="l00460"></a><span class="lineno"> 460</span>&#160; }</div> <div class="line"><a name="l00461"></a><span class="lineno"> 461</span>&#160;</div> <div class="line"><a name="l00462"></a><span class="lineno"> 462</span>&#160;}</div> <div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160;</div> <div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160;</div> <div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160;};</div> <div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160;</div> <div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160;<span class="keyword">template</span>&lt;<span class="keyword">typename</span> Po<span class="keywordtype">int</span>Type&gt; Box&lt;PointType&gt; operator *(<span class="keyword">const</span> Box&lt;PointType&gt;&amp; box, <span class="keyword">const</span> <span class="keywordtype">double</span>&amp; v) {</div> <div class="line"><a name="l00468"></a><span class="lineno"> 468</span>&#160; PointType pmin = box.min_corner();</div> <div class="line"><a name="l00469"></a><span class="lineno"> 469</span>&#160; PointType pmax = box.max_corner();</div> <div class="line"><a name="l00470"></a><span class="lineno"> 470</span>&#160; <span class="keywordtype">double</span> deltaX = box.xlength() * v / 2;</div> <div class="line"><a name="l00471"></a><span class="lineno"> 471</span>&#160; <span class="keywordtype">double</span> deltaY = box.ylength() * v / 2;</div> <div class="line"><a name="l00472"></a><span class="lineno"> 472</span>&#160; <span class="keywordtype">double</span> deltaZ = box.is3d() ? box.zlength() * v / 2 : 0;</div> <div class="line"><a name="l00473"></a><span class="lineno"> 473</span>&#160; pmin -= {deltaX, deltaY, deltaZ};</div> <div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; pmax += {deltaX, deltaY, deltaZ};</div> <div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; <span class="keywordflow">return</span> Box&lt;PointType&gt;(pmin, pmax);</div> <div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160;}</div> <div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160;</div> <div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160;<span class="keyword">typedef</span> <a class="code" href="class_ilwis_1_1_box.html">Ilwis::Box&lt;Ilwis::Pixel&gt;</a> BoundingBox;</div> <div class="line"><a name="l00479"></a><span class="lineno"> 479</span>&#160;<span class="keyword">typedef</span> <a class="code" href="class_ilwis_1_1_box.html">Ilwis::Box&lt;Ilwis::Coordinate&gt;</a> Envelope;</div> <div class="line"><a name="l00480"></a><span class="lineno"> 480</span>&#160;</div> <div class="line"><a name="l00481"></a><span class="lineno"> 481</span>&#160;}</div> <div class="line"><a name="l00482"></a><span class="lineno"> 482</span>&#160;</div> <div class="line"><a name="l00483"></a><span class="lineno"> 483</span>&#160;</div> <div class="line"><a name="l00484"></a><span class="lineno"> 484</span>&#160;Q_DECLARE_METATYPE(<a class="code" href="class_ilwis_1_1_box.html">Ilwis::BoundingBox</a>)</div> <div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160;Q_DECLARE_METATYPE(Ilwis::Box&lt;Ilwis::Pixeld&gt;)</div> <div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160;Q_DECLARE_METATYPE(Ilwis::Envelope)</div> <div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160;</div> <div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160;</div> <div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160;</div> <div class="line"><a name="l00490"></a><span class="lineno"> 490</span>&#160;<span class="preprocessor">#endif // BOX_H</span></div> </div><!-- fragment --></div><!-- contents --> <!-- start footer part --> <hr class="footer"/><address class="footer"><small> Generated on Fri Mar 28 2014 13:51:04 for Ilwis-Objects by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.8.3.1 </small></address> </body> </html>
Java
package eu.dowsing.kolla.widget.brick.facade; import javafx.scene.layout.Pane; import javafx.scene.paint.Color; import javafx.scene.shape.Circle; import javafx.scene.shape.CircleBuilder; import javafx.scene.shape.Rectangle; import javafx.scene.shape.RectangleBuilder; import com.leapmotion.leap.Hand; import eu.dowsing.kolla.widget.brick.model.BrickModel; import eu.dowsing.kolla.widget.brick.model.BrickModel.Position; /** * Represents a complete hand including its fingers. * * @author richardg * */ public class BrickView { // port(left hand:red) and starboard(right hand:green) public enum Importance { PRIMARY, SECONDARY } private Rectangle horizontal; private Rectangle vertical; private Rectangle[] fingerRects; private Circle hint; /** Hints at where the gesture started. **/ private Circle startHint; public BrickView(Pane p, int rectHeight, int rectWidth, int rectX, int rectY, int miniRectHeight, int miniRectWidth) { drawIndicator(p, rectHeight, rectWidth, rectX, rectY, miniRectHeight, miniRectWidth); } private void drawIndicator(Pane p, int hHeight, int hWidth, int rectX, int rectY, int mHeight, int mWidth) { final int fingerCount = 5; fingerRects = new Rectangle[fingerCount]; final int rectMargin = 10; final int hRealWidth = hWidth - (2 * rectMargin); // create the measure for the mini finger rectangles int miniRectMargin = rectMargin / 2; int mRealWidth = mWidth - miniRectMargin; int mRectX = rectX + (miniRectMargin / 2); int mRectY = rectY; // create measures for the vertical rectangle final int vWidth = hHeight; final int vHeight = hWidth / 2; // create the circle indicating where the hand can be this.hint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2)) .centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build(); p.getChildren().add(hint); // create the circle indicating where the gesture started this.startHint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2)) .centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build(); p.getChildren().add(startHint); // create the rectangle indicating position of the hand horizontal = RectangleBuilder.create().height(hHeight).width(hRealWidth).arcHeight(0).arcWidth(0) .stroke(Color.RED).fill(Color.web("blue", 0.1)).translateX(rectX).translateY(rectY).build(); p.getChildren().add(horizontal); // create rectangle indicating if the hand is vertical vertical = RectangleBuilder.create().height(vHeight).width(vWidth).arcHeight(0).arcWidth(0).stroke(Color.RED) .fill(Color.web("blue", 0.1)).translateX(rectX + (vWidth / 2)).translateY(rectY - (vHeight / 2)) .build(); p.getChildren().add(vertical); // now create the rectangles indicating fingers found for (int i = 0; i < fingerRects.length; i++) { Rectangle mini = RectangleBuilder.create().height(mHeight).width(mRealWidth).arcHeight(0).arcWidth(0) .stroke(Color.GREEN).fill(Color.web("blue", 0.1)).translateX(mRectX + (i * mWidth)) .translateY(mRectY).build(); fingerRects[i] = mini; p.getChildren().add(mini); } } public Color getPitchColor(Hand h) { double direction = Math.toDegrees(h.direction().pitch()); if (direction < 10 && direction > -10) { return Color.web("blue", 0.1); } else if (direction < 100 && direction > 80) { return Color.web("green", 0.1); } else if (direction < -80 && direction > -100) { return Color.web("yellow", 0.1); } else { return Color.web("red", 0.1); } } public Color getHandColor(Importance importance) { // port(left hand/secondary:red) and starboard(right hand/primary:green) if (importance == Importance.PRIMARY) { return Color.web("green", 1); } else if (importance == Importance.SECONDARY) { return Color.web("red", 1); } else { return Color.web("yellow", 1); } } public void setShowGestureStart(Importance importance) { Color fill = getHandColor(importance); this.startHint.setVisible(true); this.startHint.setFill(fill); } /** * Show the hand * * @param importance * @param pos * @param fingerCount * @param handledGesture */ public void showHand(Importance importance, Position pos, int fingerCount, boolean handledGesture) { // first all rectangles visible setVisible(true); // hide vertical or horizontal position Color fill = getHandColor(importance); if (pos == Position.HORIZONTAL) { vertical.setVisible(false); } else if (pos == Position.VERTICAL) { horizontal.setVisible(false); } // notify the user that the gesture was handled if (handledGesture) { fill = Color.web("yellow", 1); } // color the rectangles horizontal.setFill(fill); vertical.setFill(fill); // then we hide invisible fingers for (int i = fingerCount; i < fingerRects.length; i++) { fingerRects[i].setVisible(false); } } /** * Show or hide the complete hand with all indicators * * @param visible */ public void setVisible(boolean visible) { hint.setVisible(visible); startHint.setVisible(visible); horizontal.setVisible(visible); vertical.setVisible(visible); for (Rectangle rect : this.fingerRects) { rect.setVisible(visible); } } /** * Show or hide only the hand hint. * * @param visible */ public void setHintVisible(boolean visible) { this.hint.setVisible(visible); } }
Java
import os import datetime from jinja2 import Environment,PackageLoader,TemplateNotFound from hotzenplotz.openstack.common import cfg from hotzenplotz.openstack.common import log as logging from hotzenplotz.openstack.common import utils from hotzenplotz.common import exception from hotzenplotz.api import validator LOG = logging.getLogger(__name__) class CronHandler(object): """Handler Cron Resource """ def __init__(self, **kwargs): env = Environment(loader=PackageLoader('hotzenplotz.worker','templates')) self.template = env.get_template('cron') self.dir_path = None # @utils.synchronized('haproxy') def do_config(self, request): try: self._validate_request(request) except exception.BadRequest as e: LOG.warn('Bad request: %s' % e) raise exception.CronConfigureError(explanation=str(e)) cmd = request['method'] msg = request['cron_resource'] if cmd == 'create_cron': try: self._create_cron(msg) except exception.CronCreateError as e: raise exception.CronConfigureError(explanation=str(e)) elif cmd == 'delete_cron': try: self._delete_cron(msg) except exception.HaproxyDeleteError as e: raise exception.CronConfigureError(explanation=str(e)) elif cmd == 'update_cron': try: self._update_cron(msg) except exception.CronUpdateError as e: raise exception.CronConfigureError(explanation=str(e)) def _create_cron(self,msg,syntax_check=False): try: output = self.template.render(cron_resource=msg) except TemplateNotFound as e: raise TemplateNotFound(str(e)) try: if not self.dir_path: self.dir_path = '/etc/puppet/modules/cron/' cron_name = msg['title'] file_path = self.dir_path + cron_name if not path.exists(file_path): with open(file_path,'a') as f: f.write(output) except exception.CronCreateError as e: raise exception.CronCreateError(explanation=str(e)) if syntax_check: try: self._test_syntax(file_path) except exception.ProcessExecutionError as e: raise exception.CronCreateError(explanation=str(e)) LOG.debug("Created the new cron successfully") def _delete_cron(self, msg): LOG.debug("Deleting cron for NAME:%s USER: %s PROJECT:%s" % (msg['id'], msg['user_id'], msg['project_id'])) try: new_cfg_path = self._create_lb_deleted_haproxy_cfg(msg) except exception.HaproxyLBNotExists as e: LOG.warn('%s', e) return ##raise exception.HaproxyDeleteError(explanation=str(e)) try: self._test_haproxy_config(new_cfg_path) except exception.ProcessExecutionError as e: raise exception.HaproxyDeleteError(explanation=str(e)) rc, backup_path = self._backup_original_cfg() if rc != 0: raise exception.HaproxyDeleteError(explanation=backup_path) rc, strerror = self._replace_original_cfg_with_new(new_cfg_path) if rc != 0: raise exception.HaproxyDeleteError(explanation=strerror) if self._reload_haproxy_cfg(backup_path) != 0: e = 'Failed to reload haproxy' raise exception.HaproxyDeleteError(explanation=str(e)) LOG.debug("Deleted the new load balancer successfully") def _update_cron(self, msg): LOG.debug("Updating the haproxy load " "balancer for NAME:%s USER: %s PROJECT:%s" % (msg['uuid'], msg['user_id'], msg['project_id'])) try: lb_deleted_cfg_path = self._create_lb_deleted_haproxy_cfg(msg) except exception.HaproxyLBNotExists as e: LOG.warn('%s', e) raise exception.HaproxyUpdateError(explanation=str(e)) try: new_cfg_path = self._create_lb_haproxy_cfg( msg, base_cfg_path=lb_deleted_cfg_path) except exception.HaproxyCreateCfgError as e: raise exception.HaproxyUpdateError(explanation=str(e)) try: self._test_haproxy_config(new_cfg_path) except exception.ProcessExecutionError as e: raise exception.HaproxyUpdateError(explanation=str(e)) LOG.debug("Updated the new load balancer successfully") def _validate_request(self, request): validate.check_tcp_request(request) def _get_lb_name(self, msg): # TODO(wenjianhn): utf-8 support, base64 ##return "%s_%s" % (msg['project_id'], return "%s" % msg['uuid'] def _is_lb_in_use(self, lb_name, base_cfg_path='/etc/haproxy/haproxy.cfg'): with open(base_cfg_path) as cfg: lines = cfg.readlines() try: in_use_lb_name = [line.split()[1] for line in lines if line.startswith('listen')] except IndexError: LOG.error("No item was found after listen directive," "is the haproxy configuraion file valid?") raise return lb_name in in_use_lb_name def _test_syntax(self, cfile_path): LOG.info('Testing the new puppet configuration file') cmd = "puppet parser validate %s" % cfile_path try: utils.execute(cmd) except exception.ProcessExecutionError as e: LOG.warn('Did not pass the configuration syntax test: %s', e) raise def _get_one_lb_info(self, line_all, line_index, line_total): value = [] for i in range(line_index, line_total): line = line_all[i] if line.startswith('\t'): value.append(line) elif line.startswith('listen'): return i, value return line_total - 1, value
Java
/* * Copyright (c) 2016, WSO2 Inc. (http://wso2.com) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.msf4j; import io.netty.buffer.ByteBuf; import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; /** * A responder for sending chunk-encoded response. */ public interface ChunkResponder extends Closeable { /** * Adds a chunk of data to the response. The content will be sent to the client asynchronously. * * @param chunk content to send * @throws IOException if the connection is already closed */ void sendChunk(ByteBuffer chunk) throws IOException; /** * Adds a chunk of data to the response. The content will be sent to the client asynchronously. * * @param chunk content to send * @throws IOException if this {@link ChunkResponder} already closed or the connection is closed */ void sendChunk(ByteBuf chunk) throws IOException; /** * Closes this responder which signals the end of the chunk response. */ @Override void close() throws IOException; }
Java
# AUTOGENERATED FILE FROM balenalib/coral-dev-alpine:3.14-run # remove several traces of python RUN apk del python* # http://bugs.python.org/issue19846 # > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK. ENV LANG C.UTF-8 # install python dependencies RUN apk add --no-cache ca-certificates libffi \ && apk add --no-cache libssl1.0 || apk add --no-cache libssl1.1 # key 63C7CC90: public key "Simon McVittie <smcv@pseudorandom.co.uk>" imported # key 3372DCFA: public key "Donald Stufft (dstufft) <donald@stufft.io>" imported RUN gpg --keyserver keyring.debian.org --recv-keys 4DE8FF2A63C7CC90 \ && gpg --keyserver keyserver.ubuntu.com --recv-key 6E3CBCE93372DCFA \ && gpg --keyserver keyserver.ubuntu.com --recv-keys 0x52a43a1e4b77b059 # point Python at a system-provided certificate database. Otherwise, we might hit CERTIFICATE_VERIFY_FAILED. # https://www.python.org/dev/peps/pep-0476/#trust-database ENV SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt ENV PYTHON_VERSION 3.10.0 # if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'" ENV PYTHON_PIP_VERSION 21.2.4 ENV SETUPTOOLS_VERSION 58.0.0 RUN set -x \ && buildDeps=' \ curl \ gnupg \ ' \ && apk add --no-cache --virtual .build-deps $buildDeps \ && curl -SLO "http://resin-packages.s3.amazonaws.com/python/v$PYTHON_VERSION/Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" \ && echo "13ab188bd0214779de247bbde0919f4c19c91f78a34d26171b567b556a06c828 Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" | sha256sum -c - \ && tar -xzf "Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" --strip-components=1 \ && rm -rf "Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" \ && if [ ! -e /usr/local/bin/pip3 ]; then : \ && curl -SLO "https://raw.githubusercontent.com/pypa/get-pip/430ba37776ae2ad89f794c7a43b90dc23bac334c/get-pip.py" \ && echo "19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c get-pip.py" | sha256sum -c - \ && python3 get-pip.py \ && rm get-pip.py \ ; fi \ && pip3 install --no-cache-dir --upgrade --force-reinstall pip=="$PYTHON_PIP_VERSION" setuptools=="$SETUPTOOLS_VERSION" \ && find /usr/local \ \( -type d -a -name test -o -name tests \) \ -o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \ -exec rm -rf '{}' + \ && cd / \ && rm -rf /usr/src/python ~/.cache # make some useful symlinks that are expected to exist RUN cd /usr/local/bin \ && ln -sf pip3 pip \ && { [ -e easy_install ] || ln -s easy_install-* easy_install; } \ && ln -sf idle3 idle \ && ln -sf pydoc3 pydoc \ && ln -sf python3 python \ && ln -sf python3-config python-config CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"] RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@python.sh" \ && echo "Running test-stack@python" \ && chmod +x test-stack@python.sh \ && bash test-stack@python.sh \ && rm -rf test-stack@python.sh RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Alpine Linux 3.14 \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nPython v3.10.0, Pip v21.2.4, Setuptools v58.0.0 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info RUN echo $'#!/bin/bash\nbalena-info\nbusybox ln -sf /bin/busybox /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \ && chmod +x /bin/sh-shim \ && ln -f /bin/sh /bin/sh.real \ && ln -f /bin/sh-shim /bin/sh
Java
# Pseudostellaria heterantha var. heterantha VARIETY #### Status ACCEPTED #### According to NUB Generator [autonym] #### Published in null #### Original name null ### Remarks null
Java
# Harpalyce angustiflora Leon & Alain SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
# Bradleia blumei Steud. SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
# Ptyssiglottis leptoneura Hallier f. SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
Java
# Fomes microporus (Sw.) Fr., 1885 SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in Grevillea 14(no. 69): 20 (1885) #### Original name Boletus microporus Sw., 1806 ### Remarks null
Java
# Stellaria vernalis Raunk. SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
Java
# Neofuscelia atroviridis (Essl.) Essl. SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
# Mougeotia genuflexa (Dillwyn) C. Agardh SPECIES #### Status ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
# Forsellesia pungens (Brandegee) A. Heller SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
Java
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/sagemaker/model/ListProcessingJobsResult.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/UnreferencedParam.h> #include <utility> using namespace Aws::SageMaker::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; using namespace Aws; ListProcessingJobsResult::ListProcessingJobsResult() { } ListProcessingJobsResult::ListProcessingJobsResult(const Aws::AmazonWebServiceResult<JsonValue>& result) { *this = result; } ListProcessingJobsResult& ListProcessingJobsResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result) { JsonView jsonValue = result.GetPayload().View(); if(jsonValue.ValueExists("ProcessingJobSummaries")) { Array<JsonView> processingJobSummariesJsonList = jsonValue.GetArray("ProcessingJobSummaries"); for(unsigned processingJobSummariesIndex = 0; processingJobSummariesIndex < processingJobSummariesJsonList.GetLength(); ++processingJobSummariesIndex) { m_processingJobSummaries.push_back(processingJobSummariesJsonList[processingJobSummariesIndex].AsObject()); } } if(jsonValue.ValueExists("NextToken")) { m_nextToken = jsonValue.GetString("NextToken"); } return *this; }
Java
#region License Header /* * QUANTLER.COM - Quant Fund Development Platform * Quantler Core Trading Engine. Copyright 2018 Quantler B.V. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #endregion License Header using MessagePack; using System; using Quantler.Securities; namespace Quantler.Data.Corporate { /// <summary> /// Dividend amount /// </summary> [MessagePackObject] public class Dividend : DataPointImpl { #region Public Constructors /// <summary> /// Initializes a new instance of the <see cref="Dividend"/> class. /// </summary> public Dividend() => DataType = DataType.Dividend; /// <summary> /// Initializes a new instance of the <see cref="Dividend"/> class. /// </summary> /// <param name="ticker">The ticker.</param> /// <param name="date">The date.</param> /// <param name="amount">The amount.</param> public Dividend(TickerSymbol ticker, DateTime date, decimal amount) : this() { Ticker = ticker; Occured = date; TimeZone = TimeZone.Utc; Amount = amount; } #endregion Public Constructors #region Public Properties /// <summary> /// Amount distribution /// </summary> [Key(6)] public decimal Amount { get => Price; set => Price = Math.Round(Price, 2); } #endregion Public Properties } }
Java
/* * Copyright 2016-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Implementation of YANG node bgpVrfAf's children nodes. */ package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf;
Java
/// <reference path="./fabricPlugin.ts"/> /// <reference path="./profileHelpers.ts"/> /// <reference path="./containerHelpers.ts"/> /// <reference path="../../helpers/js/storageHelpers.ts"/> /// <reference path="../../helpers/js/controllerHelpers.ts"/> /// <reference path="../../helpers/js/selectionHelpers.ts"/> /// <reference path="../../helpers/js/filterHelpers.ts"/> module Fabric { export var ContainerViewController = _module.controller("Fabric.ContainerViewController", ["$scope", "jolokia", "$location", "localStorage", "$route", "workspace", "marked", "ProfileCart", "$dialog", ($scope, jolokia, $location, localStorage, $route, workspace:Workspace, marked, ProfileCart, $dialog) => { $scope.name = ContainerViewController.name; $scope.containers = <Array<Container>>[]; $scope.selectedContainers = <Array<Container>>[]; $scope.groupBy = 'none'; $scope.filter = ''; $scope.cartItems = []; $scope.versionIdFilter = ''; $scope.profileIdFilter = ''; $scope.locationIdFilter = ''; $scope.hasCounts = true; $scope.toString = Core.toString; $scope.filterContainersText = 'Filter Containers...'; $scope.filterProfilesText = 'Filter Profiles...'; $scope.filterLocationsText = 'Filter Locations...'; $scope.filterBoxText = $scope.filterContainersText; $scope.selectedTags = []; $scope.createLocationDialog = ContainerHelpers.getCreateLocationDialog($scope, $dialog); var containerFields = ['id', 'profileIds', 'profiles', 'versionId', 'location', 'alive', 'type', 'ensembleServer', 'provisionResult', 'root', 'jolokiaUrl', 'jmxDomains', 'metadata', 'parentId']; var profileFields = ['id', 'hidden', 'version', 'summaryMarkdown', 'iconURL', 'tags']; Fabric.initScope($scope, $location, jolokia, workspace); SelectionHelpers.decorate($scope); // when viewing profile boxes in container view, disable checkboxes $scope.viewOnly = true; StorageHelpers.bindModelToLocalStorage({ $scope: $scope, $location: $location, localStorage: localStorage, modelName: 'groupBy', paramName: 'groupBy', initialValue: $scope.groupBy }); StorageHelpers.bindModelToLocalStorage({ $scope: $scope, $location: $location, localStorage: localStorage, modelName: 'versionIdFilter', paramName: 'versionIdFilter', initialValue: $scope.versionIdFilter }); StorageHelpers.bindModelToLocalStorage({ $scope: $scope, $location: $location, localStorage: localStorage, modelName: 'profileIdFilter', paramName: 'profileIdFilter', initialValue: $scope.profileIdFilter }); StorageHelpers.bindModelToLocalStorage({ $scope: $scope, $location: $location, localStorage: localStorage, modelName: 'locationIdFilter', paramName: 'locationIdFilter', initialValue: $scope.locationIdFilter }); $scope.groupByClass = ControllerHelpers.createClassSelector({ 'profileIds': 'btn-primary', 'location': 'btn-primary', 'none': 'btn-primary' }); $scope.$watch('containers', (newValue, oldValue) => { if (newValue !== oldValue) { $scope.selectedContainers = $scope.containers.filter((container) => { return container['selected']; }); } }, true); $scope.maybeShowLocation = () => { return ($scope.groupBy === 'location' || $scope.groupBy === 'none') && $scope.selectedContainers.length > 0; } $scope.showContainersFor = (thing) => { if (angular.isString(thing)) { $scope.locationIdFilter = thing; } else { $scope.profileIdFilter = thing.id; $scope.versionIdFilter = thing.version; } $scope.groupBy = 'none'; } $scope.filterLocation = (locationId) => { return FilterHelpers.searchObject(locationId, $scope.filter); } $scope.filterProfiles = (profile) => { return FilterHelpers.searchObject(profile.id, $scope.filter); } $scope.filterContainers = (container) => { if (!Core.isBlank($scope.versionIdFilter) && container.versionId !== $scope.versionIdFilter) { return false; } if (!Core.isBlank($scope.profileIdFilter) && !container.profileIds.any($scope.profileIdFilter)) { return false; } if (!Core.isBlank($scope.locationIdFilter) && container.location !== $scope.locationIdFilter) { return false; } return FilterHelpers.searchObject(container.id, $scope.filter); } $scope.filterContainer = $scope.filterContainers; $scope.viewProfile = (profile:Profile) => { Fabric.gotoProfile(workspace, jolokia, workspace.localStorage, $location, profile.version, profile.id); }; function maybeAdd(group: Array<any>, thing:any, index:string) { if (angular.isArray(thing)) { thing.forEach((i) => { maybeAdd(group, i, index); }); } else { if (!group.any((item) => { return thing[index] === item[index] })) { group.add(thing); } } } function groupByVersions(containers:Array<Container>) { var answer = {}; containers.forEach((container) => { var versionId = container.versionId; var version = answer[versionId] || { containers: <Array<Container>>[], profiles: <Array<Profile>>[] }; maybeAdd(version.containers, container, 'id'); maybeAdd(version.profiles, container.profiles, 'id'); answer[versionId] = version; }); return answer; } function groupByLocation(containers:Array<Container>) { var answer = {}; containers.forEach((container) => { var location = container.location; var loc = answer[location] || { containers: Array<Container>() }; maybeAdd(loc.containers, container, 'id'); answer[location] = loc; }); return answer; } Fabric.loadRestApi(jolokia, workspace, undefined, (response) => { $scope.restApiUrl = UrlHelpers.maybeProxy(Core.injector.get('jolokiaUrl'), response.value); log.debug("Scope rest API: ", $scope.restApiUrl); Core.registerForChanges(jolokia, $scope, { type: 'exec', mbean: Fabric.managerMBean, operation: 'containers(java.util.List, java.util.List)', arguments:[containerFields, profileFields] }, (response) => { var containers = response.value; SelectionHelpers.sync($scope.selectedContainers, containers, 'id'); var versions = {}; var locations = {}; // massage the returned data a bit first containers.forEach((container) => { if (Core.isBlank(container.location)) { container.location = ContainerHelpers.NO_LOCATION; } container.profiles = container.profiles.filter((p) => { return !p.hidden }); container.icon = Fabric.getTypeIcon(container); container.services = Fabric.getServiceList(container); }); var versions = groupByVersions(containers); angular.forEach(versions, (version, versionId) => { version.profiles.forEach((profile) => { var containers = version.containers.filter((c) => { return c.profileIds.some(profile.id); }); profile.aliveCount = containers.count((c) => { return c.alive; }); profile.deadCount = containers.length - profile.aliveCount; profile.summary = profile.summaryMarkdown ? marked(profile.summaryMarkdown) : ''; profile.iconURL = Fabric.toIconURL($scope, profile.iconURL); profile.tags = ProfileHelpers.getTags(profile); }); }); var locations = groupByLocation(containers); var locationIds = ContainerHelpers.extractLocations(containers); $scope.locationMenu = ContainerHelpers.buildLocationMenu($scope, jolokia, locationIds); // grouped by location $scope.locations = locations; // grouped by version/profile $scope.versions = versions; // Sort by id with child containers grouped under parents var sortedContainers = containers.sortBy('id'); var rootContainers = sortedContainers.exclude((c) => { return !c.root; }); var childContainers = sortedContainers.exclude((c) => { return c.root; }); if (childContainers.length > 0) { var tmp = []; rootContainers.each((c) => { tmp.add(c); var children = childContainers.exclude((child) => { return child.parentId !== c.id }); tmp.add(children); }); containers = tmp; } $scope.containers = containers; Core.$apply($scope); }); Core.registerForChanges(jolokia, $scope, { type: 'read', mbean: Fabric.clusterManagerMBean, attribute: 'EnsembleContainers' }, (response) => { $scope.ensembleContainerIds = response.value; Core.$apply($scope); }); }); }]); }
Java
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.SplitTransaction; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.util.ConfigUtil; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.KeyLocker; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.data.Stat; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.LinkedHashMultimap; /** * Manages and performs region assignment. * <p> * Monitors ZooKeeper for events related to regions in transition. * <p> * Handles existing regions in transition during master failover. */ @InterfaceAudience.Private public class AssignmentManager extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(AssignmentManager.class); public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME, -1, -1L); public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout"; public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000; public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management"; public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false; public static final String ALREADY_IN_TRANSITION_WAITTIME = "hbase.assignment.already.intransition.waittime"; public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute protected final Server server; private ServerManager serverManager; private boolean shouldAssignRegionsWithFavoredNodes; private CatalogTracker catalogTracker; protected final TimeoutMonitor timeoutMonitor; private final TimerUpdater timerUpdater; private LoadBalancer balancer; private final MetricsAssignmentManager metricsAssignmentManager; private final TableLockManager tableLockManager; private AtomicInteger numRegionsOpened = new AtomicInteger(0); final private KeyLocker<String> locker = new KeyLocker<String>(); /** * Map of regions to reopen after the schema of a table is changed. Key - * encoded region name, value - HRegionInfo */ private final Map <String, HRegionInfo> regionsToReopen; /* * Maximum times we recurse an assignment/unassignment. * See below in {@link #assign()} and {@link #unassign()}. */ private final int maximumAttempts; /** * Map of two merging regions from the region to be created. */ private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions = new HashMap<String, PairOfSameType<HRegionInfo>>(); /** * The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment * failure due to lack of availability of region plan */ private final long sleepTimeBeforeRetryingMetaAssignment; /** Plans for region movement. Key is the encoded version of a region name*/ // TODO: When do plans get cleaned out? Ever? In server open and in server // shutdown processing -- St.Ack // All access to this Map must be synchronized. final NavigableMap<String, RegionPlan> regionPlans = new TreeMap<String, RegionPlan>(); private final ZKTable zkTable; /** * Contains the server which need to update timer, these servers will be * handled by {@link TimerUpdater} */ private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer; private final ExecutorService executorService; // For unit tests, keep track of calls to ClosedRegionHandler private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null; // For unit tests, keep track of calls to OpenedRegionHandler private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null; //Thread pool executor service for timeout monitor private java.util.concurrent.ExecutorService threadPoolExecutorService; // A bunch of ZK events workers. Each is a single thread executor service private final java.util.concurrent.ExecutorService zkEventWorkers; private List<EventType> ignoreStatesRSOffline = Arrays.asList( EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED); private final RegionStates regionStates; // The threshold to use bulk assigning. Using bulk assignment // only if assigning at least this many regions to at least this // many servers. If assigning fewer regions to fewer servers, // bulk assigning may be not as efficient. private final int bulkAssignThresholdRegions; private final int bulkAssignThresholdServers; // Should bulk assignment wait till all regions are assigned, // or it is timed out? This is useful to measure bulk assignment // performance, but not needed in most use cases. private final boolean bulkAssignWaitTillAllAssigned; /** * Indicator that AssignmentManager has recovered the region states so * that ServerShutdownHandler can be fully enabled and re-assign regions * of dead servers. So that when re-assignment happens, AssignmentManager * has proper region states. * * Protected to ease testing. */ protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false); /** Is the TimeOutManagement activated **/ private final boolean tomActivated; /** * A map to track the count a region fails to open in a row. * So that we don't try to open a region forever if the failure is * unrecoverable. We don't put this information in region states * because we don't expect this to happen frequently; we don't * want to copy this information over during each state transition either. */ private final ConcurrentHashMap<String, AtomicInteger> failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>(); // A flag to indicate if we are using ZK for region assignment private final boolean useZKForAssignment; // In case not using ZK for region assignment, region states // are persisted in meta with a state store private final RegionStateStore regionStateStore; /** * For testing only! Set to true to skip handling of split. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL") public static boolean TEST_SKIP_SPLIT_HANDLING = false; /** Listeners that are called on assignment events. */ private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>(); /** * Constructs a new assignment manager. * * @param server * @param serverManager * @param catalogTracker * @param service * @throws KeeperException * @throws IOException */ public AssignmentManager(Server server, ServerManager serverManager, CatalogTracker catalogTracker, final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, final TableLockManager tableLockManager) throws KeeperException, IOException { super(server.getZooKeeper()); this.server = server; this.serverManager = serverManager; this.catalogTracker = catalogTracker; this.executorService = service; this.regionStateStore = new RegionStateStore(server); this.regionsToReopen = Collections.synchronizedMap (new HashMap<String, HRegionInfo> ()); Configuration conf = server.getConfiguration(); // Only read favored nodes if using the favored nodes load balancer. this.shouldAssignRegionsWithFavoredNodes = conf.getClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals( FavoredNodeLoadBalancer.class); this.tomActivated = conf.getBoolean( ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT); if (tomActivated){ this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>(); this.timeoutMonitor = new TimeoutMonitor( conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000), server, serverManager, conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT)); this.timerUpdater = new TimerUpdater(conf.getInt( "hbase.master.assignment.timerupdater.period", 10000), server); Threads.setDaemonThreadRunning(timerUpdater.getThread(), server.getServerName() + ".timerUpdater"); } else { this.serversInUpdatingTimer = null; this.timeoutMonitor = null; this.timerUpdater = null; } this.zkTable = new ZKTable(this.watcher); // This is the max attempts, not retries, so it should be at least 1. this.maximumAttempts = Math.max(1, this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong( "hbase.meta.assignment.retry.sleeptime", 1000l); this.balancer = balancer; int maxThreads = conf.getInt("hbase.assignment.threads.max", 30); this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool( maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM.")); this.regionStates = new RegionStates(server, serverManager, regionStateStore); this.bulkAssignWaitTillAllAssigned = conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false); this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7); this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3); int workers = conf.getInt("hbase.assignment.zkevent.workers", 20); ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker"); zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L, TimeUnit.SECONDS, threadFactory); this.tableLockManager = tableLockManager; this.metricsAssignmentManager = new MetricsAssignmentManager(); useZKForAssignment = ConfigUtil.useZKForAssignment(conf); } void startTimeOutMonitor() { if (tomActivated) { Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName() + ".timeoutMonitor"); } } /** * Add the listener to the notification list. * @param listener The AssignmentListener to register */ public void registerListener(final AssignmentListener listener) { this.listeners.add(listener); } /** * Remove the listener from the notification list. * @param listener The AssignmentListener to unregister */ public boolean unregisterListener(final AssignmentListener listener) { return this.listeners.remove(listener); } /** * @return Instance of ZKTable. */ public ZKTable getZKTable() { // These are 'expensive' to make involving trip to zk ensemble so allow // sharing. return this.zkTable; } /** * This SHOULD not be public. It is public now * because of some unit tests. * * TODO: make it package private and keep RegionStates in the master package */ public RegionStates getRegionStates() { return regionStates; } /** * Used in some tests to mock up region state in meta */ @VisibleForTesting RegionStateStore getRegionStateStore() { return regionStateStore; } public RegionPlan getRegionReopenPlan(HRegionInfo hri) { return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri)); } /** * Add a regionPlan for the specified region. * @param encodedName * @param plan */ public void addPlan(String encodedName, RegionPlan plan) { synchronized (regionPlans) { regionPlans.put(encodedName, plan); } } /** * Add a map of region plans. */ public void addPlans(Map<String, RegionPlan> plans) { synchronized (regionPlans) { regionPlans.putAll(plans); } } /** * Set the list of regions that will be reopened * because of an update in table schema * * @param regions * list of regions that should be tracked for reopen */ public void setRegionsToReopen(List <HRegionInfo> regions) { for(HRegionInfo hri : regions) { regionsToReopen.put(hri.getEncodedName(), hri); } } /** * Used by the client to identify if all regions have the schema updates * * @param tableName * @return Pair indicating the status of the alter command * @throws IOException */ public Pair<Integer, Integer> getReopenStatus(TableName tableName) throws IOException { List <HRegionInfo> hris = MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true); Integer pending = 0; for (HRegionInfo hri : hris) { String name = hri.getEncodedName(); // no lock concurrent access ok: sequential consistency respected. if (regionsToReopen.containsKey(name) || regionStates.isRegionInTransition(name)) { pending++; } } return new Pair<Integer, Integer>(pending, hris.size()); } /** * Used by ServerShutdownHandler to make sure AssignmentManager has completed * the failover cleanup before re-assigning regions of dead servers. So that * when re-assignment happens, AssignmentManager has proper region states. */ public boolean isFailoverCleanupDone() { return failoverCleanupDone.get(); } /** * To avoid racing with AM, external entities may need to lock a region, * for example, when SSH checks what regions to skip re-assigning. */ public Lock acquireRegionLock(final String encodedName) { return locker.acquireLock(encodedName); } /** * Now, failover cleanup is completed. Notify server manager to * process queued up dead servers processing, if any. */ void failoverCleanupDone() { failoverCleanupDone.set(true); serverManager.processQueuedDeadServers(); } /** * Called on startup. * Figures whether a fresh cluster start of we are joining extant running cluster. * @throws IOException * @throws KeeperException * @throws InterruptedException */ void joinCluster() throws IOException, KeeperException, InterruptedException { long startTime = System.currentTimeMillis(); // Concurrency note: In the below the accesses on regionsInTransition are // outside of a synchronization block where usually all accesses to RIT are // synchronized. The presumption is that in this case it is safe since this // method is being played by a single thread on startup. // TODO: Regions that have a null location and are not in regionsInTransitions // need to be handled. // Scan hbase:meta to build list of existing regions, servers, and assignment // Returns servers who have not checked in (assumed dead) and their regions Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions(); // This method will assign all user regions if a clean server startup or // it will reconstruct master state and cleanup any leftovers from // previous master process. boolean failover = processDeadServersAndRegionsInTransition(deadServers); if (!useZKForAssignment) { // Not use ZK for assignment any more, remove the ZNode ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode); } recoverTableInDisablingState(); recoverTableInEnablingState(); LOG.info("Joined the cluster in " + (System.currentTimeMillis() - startTime) + "ms, failover=" + failover); } /** * Process all regions that are in transition in zookeeper and also * processes the list of dead servers by scanning the META. * Used by master joining an cluster. If we figure this is a clean cluster * startup, will assign all user regions. * @param deadServers * Map of dead servers and their regions. Can be null. * @throws KeeperException * @throws IOException * @throws InterruptedException */ boolean processDeadServersAndRegionsInTransition( final Map<ServerName, List<HRegionInfo>> deadServers) throws KeeperException, IOException, InterruptedException { List<String> nodes = ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode); if (nodes == null && useZKForAssignment) { String errorMessage = "Failed to get the children from ZK"; server.abort(errorMessage, new IOException(errorMessage)); return true; // Doesn't matter in this case } boolean failover = !serverManager.getDeadServers().isEmpty(); if (failover) { // This may not be a failover actually, especially if meta is on this master. if (LOG.isDebugEnabled()) { LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers()); } } else { // If any one region except meta is assigned, it's a failover. Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet(); for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) { HRegionInfo hri = en.getKey(); if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) { LOG.debug("Found " + hri + " out on cluster"); failover = true; break; } } } if (!failover && nodes != null) { // If any one region except meta is in transition, it's a failover. for (String encodedName : nodes) { RegionState regionState = regionStates.getRegionState(encodedName); if (regionState != null && !regionState.getRegion().isMetaRegion()) { LOG.debug("Found " + regionState + " in RITs"); failover = true; break; } } } if (!failover && !useZKForAssignment) { // If any region except meta is in transition on a live server, it's a failover. Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition(); if (!regionsInTransition.isEmpty()) { Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet(); for (RegionState regionState : regionsInTransition.values()) { if (!regionState.getRegion().isMetaRegion() && onlineServers.contains(regionState.getServerName())) { LOG.debug("Found " + regionState + " in RITs"); failover = true; break; } } } } if (!failover) { // If we get here, we have a full cluster restart. It is a failover only // if there are some HLogs are not split yet. For meta HLogs, they should have // been split already, if any. We can walk through those queued dead servers, // if they don't have any HLogs, this restart should be considered as a clean one Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet(); if (!queuedDeadServers.isEmpty()) { Configuration conf = server.getConfiguration(); Path rootdir = FSUtils.getRootDir(conf); FileSystem fs = rootdir.getFileSystem(conf); for (ServerName serverName : queuedDeadServers) { Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(HLog.SPLITTING_EXT); if (fs.exists(logDir) || fs.exists(splitDir)) { LOG.debug("Found queued dead server " + serverName); failover = true; break; } } if (!failover) { // We figured that it's not a failover, so no need to // work on these re-queued dead servers any more. LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size() + " queued dead servers"); serverManager.removeRequeuedDeadServers(); } } } Set<TableName> disabledOrDisablingOrEnabling = null; if (!failover) { disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher); disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher)); // Clean re/start, mark all user regions closed before reassignment // TODO -Hbase-11319 regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling); } // Now region states are restored regionStateStore.start(); // If we found user regions out on cluster, its a failover. if (failover) { LOG.info("Found regions out on cluster or in RIT; presuming failover"); // Process list of dead servers and regions in RIT. // See HBASE-4580 for more information. processDeadServersAndRecoverLostRegions(deadServers); } if (!failover && useZKForAssignment) { // Cleanup any existing ZK nodes and start watching ZKAssign.deleteAllNodes(watcher); ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode); } // Now we can safely claim failover cleanup completed and enable // ServerShutdownHandler for further processing. The nodes (below) // in transition, if any, are for regions not related to those // dead servers at all, and can be done in parallel to SSH. failoverCleanupDone(); if (!failover) { // Fresh cluster startup. LOG.info("Clean cluster startup. Assigning user regions"); assignAllUserRegions(disabledOrDisablingOrEnabling); } return failover; } /** * If region is up in zk in transition, then do fixup and block and wait until * the region is assigned and out of transition. Used on startup for * catalog regions. * @param hri Region to look for. * @return True if we processed a region in transition else false if region * was not up in zk in transition. * @throws InterruptedException * @throws KeeperException * @throws IOException */ boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri) throws InterruptedException, KeeperException, IOException { String encodedRegionName = hri.getEncodedName(); if (!processRegionInTransition(encodedRegionName, hri)) { return false; // The region is not in transition } LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName)); while (!this.server.isStopped() && this.regionStates.isRegionInTransition(encodedRegionName)) { RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName); if (state == null || !serverManager.isServerOnline(state.getServerName())) { // The region is not in transition, or not in transition on an online // server. Doesn't help to block here any more. Caller need to // verify the region is actually assigned. break; } this.regionStates.waitForUpdate(100); } return true; } /** * Process failover of new master for region <code>encodedRegionName</code> * up in zookeeper. * @param encodedRegionName Region to process failover for. * @param regionInfo If null we'll go get it from meta table. * @return True if we processed <code>regionInfo</code> as a RIT. * @throws KeeperException * @throws IOException */ boolean processRegionInTransition(final String encodedRegionName, final HRegionInfo regionInfo) throws KeeperException, IOException { // We need a lock here to ensure that we will not put the same region twice // It has no reason to be a lock shared with the other operations. // We can do the lock on the region only, instead of a global lock: what we want to ensure // is that we don't have two threads working on the same region. Lock lock = locker.acquireLock(encodedRegionName); try { Stat stat = new Stat(); byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat); if (data == null) return false; RegionTransition rt; try { rt = RegionTransition.parseFrom(data); } catch (DeserializationException e) { LOG.warn("Failed parse znode data", e); return false; } HRegionInfo hri = regionInfo; if (hri == null) { // The region info is not passed in. We will try to find the region // from region states map/meta based on the encoded region name. But we // may not be able to find it. This is valid for online merge that // the region may have not been created if the merge is not completed. // Therefore, it is not in meta at master recovery time. hri = regionStates.getRegionInfo(rt.getRegionName()); EventType et = rt.getEventType(); if (hri == null && et != EventType.RS_ZK_REGION_MERGING && et != EventType.RS_ZK_REQUEST_REGION_MERGE) { LOG.warn("Couldn't find the region in recovering " + rt); return false; } } return processRegionsInTransition( rt, hri, stat.getVersion()); } finally { lock.unlock(); } } /** * This call is invoked only (1) master assign meta; * (2) during failover mode startup, zk assignment node processing. * The locker is set in the caller. It returns true if the region * is in transition for sure, false otherwise. * * It should be private but it is used by some test too. */ boolean processRegionsInTransition( final RegionTransition rt, final HRegionInfo regionInfo, final int expectedVersion) throws KeeperException { EventType et = rt.getEventType(); // Get ServerName. Could not be null. final ServerName sn = rt.getServerName(); final byte[] regionName = rt.getRegionName(); final String encodedName = HRegionInfo.encodeRegionName(regionName); final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName); LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et); if (regionStates.isRegionInTransition(encodedName) && (regionInfo.isMetaRegion() || !useZKForAssignment)) { LOG.info("Processed region " + prettyPrintedRegionName + " in state: " + et + ", does nothing since the region is already in transition " + regionStates.getRegionTransitionState(encodedName)); // Just return return true; } if (!serverManager.isServerOnline(sn)) { // It was transitioning on a dead server, so it's closed now. // Force to OFFLINE and put it in transition, but not assign it // since log splitting for the dead server is not done yet. LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() + " was on deadserver; forcing offline"); if (regionStates.isRegionOnline(regionInfo)) { // Meta could still show the region is assigned to the previous // server. If that server is online, when we reload the meta, the // region is put back to online, we need to offline it. regionStates.regionOffline(regionInfo); sendRegionClosedNotification(regionInfo); } // Put it back in transition so that SSH can re-assign it regionStates.updateRegionState(regionInfo, State.OFFLINE, sn); if (regionInfo.isMetaRegion()) { // If it's meta region, reset the meta location. // So that master knows the right meta region server. MetaRegionTracker.setMetaLocation(watcher, sn); } else { // No matter the previous server is online or offline, // we need to reset the last region server of the region. regionStates.setLastRegionServerOfRegion(sn, encodedName); // Make sure we know the server is dead. if (!serverManager.isServerDead(sn)) { serverManager.expireServer(sn); } } return false; } switch (et) { case M_ZK_REGION_CLOSING: // Insert into RIT & resend the query to the region server: may be the previous master // died before sending the query the first time. final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING); this.executorService.submit( new EventHandler(server, EventType.M_MASTER_RECOVERY) { @Override public void process() throws IOException { ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName()); try { unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null); if (regionStates.isRegionOffline(regionInfo)) { assign(regionInfo, true); } } finally { lock.unlock(); } } }); break; case RS_ZK_REGION_CLOSED: case RS_ZK_REGION_FAILED_OPEN: // Region is closed, insert into RIT and handle it regionStates.updateRegionState(regionInfo, State.CLOSED, sn); invokeAssign(regionInfo); break; case M_ZK_REGION_OFFLINE: // Insert in RIT and resend to the regionserver regionStates.updateRegionState(rt, State.PENDING_OPEN); final RegionState rsOffline = regionStates.getRegionState(regionInfo); this.executorService.submit( new EventHandler(server, EventType.M_MASTER_RECOVERY) { @Override public void process() throws IOException { ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName()); try { RegionPlan plan = new RegionPlan(regionInfo, null, sn); addPlan(encodedName, plan); assign(rsOffline, false, false); } finally { lock.unlock(); } } }); break; case RS_ZK_REGION_OPENING: regionStates.updateRegionState(rt, State.OPENING); break; case RS_ZK_REGION_OPENED: // Region is opened, insert into RIT and handle it // This could be done asynchronously, we would need then to acquire the lock in the // handler. regionStates.updateRegionState(rt, State.OPEN); new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process(); break; case RS_ZK_REQUEST_REGION_SPLIT: case RS_ZK_REGION_SPLITTING: case RS_ZK_REGION_SPLIT: // Splitting region should be online. We could have skipped it during // user region rebuilding since we may consider the split is completed. // Put it in SPLITTING state to avoid complications. regionStates.regionOnline(regionInfo, sn); regionStates.updateRegionState(rt, State.SPLITTING); if (!handleRegionSplitting( rt, encodedName, prettyPrintedRegionName, sn)) { deleteSplittingNode(encodedName, sn); } break; case RS_ZK_REQUEST_REGION_MERGE: case RS_ZK_REGION_MERGING: case RS_ZK_REGION_MERGED: if (!handleRegionMerging( rt, encodedName, prettyPrintedRegionName, sn)) { deleteMergingNode(encodedName, sn); } break; default: throw new IllegalStateException("Received region in state:" + et + " is not valid."); } LOG.info("Processed region " + prettyPrintedRegionName + " in state " + et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ") + "server: " + sn); return true; } /** * When a region is closed, it should be removed from the regionsToReopen * @param hri HRegionInfo of the region which was closed */ public void removeClosedRegion(HRegionInfo hri) { if (regionsToReopen.remove(hri.getEncodedName()) != null) { LOG.debug("Removed region from reopening regions because it was closed"); } } /** * Handles various states an unassigned node can be in. * <p> * Method is called when a state change is suspected for an unassigned node. * <p> * This deals with skipped transitions (we got a CLOSED but didn't see CLOSING * yet). * @param rt * @param expectedVersion */ void handleRegion(final RegionTransition rt, int expectedVersion) { if (rt == null) { LOG.warn("Unexpected NULL input for RegionTransition rt"); return; } final ServerName sn = rt.getServerName(); // Check if this is a special HBCK transition if (sn.equals(HBCK_CODE_SERVERNAME)) { handleHBCK(rt); return; } final long createTime = rt.getCreateTime(); final byte[] regionName = rt.getRegionName(); String encodedName = HRegionInfo.encodeRegionName(regionName); String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName); // Verify this is a known server if (!serverManager.isServerOnline(sn) && !ignoreStatesRSOffline.contains(rt.getEventType())) { LOG.warn("Attempted to handle region transition for server but " + "it is not online: " + prettyPrintedRegionName + ", " + rt); return; } RegionState regionState = regionStates.getRegionState(encodedName); long startTime = System.currentTimeMillis(); if (LOG.isDebugEnabled()) { boolean lateEvent = createTime < (startTime - 15000); LOG.debug("Handling " + rt.getEventType() + ", server=" + sn + ", region=" + (prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) + (lateEvent ? ", which is more than 15 seconds late" : "") + ", current_state=" + regionState); } // We don't do anything for this event, // so separate it out, no need to lock/unlock anything if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) { return; } // We need a lock on the region as we could update it Lock lock = locker.acquireLock(encodedName); try { RegionState latestState = regionStates.getRegionState(encodedName); if ((regionState == null && latestState != null) || (regionState != null && latestState == null) || (regionState != null && latestState != null && latestState.getState() != regionState.getState())) { LOG.warn("Region state changed from " + regionState + " to " + latestState + ", while acquiring lock"); } long waitedTime = System.currentTimeMillis() - startTime; if (waitedTime > 5000) { LOG.warn("Took " + waitedTime + "ms to acquire the lock"); } regionState = latestState; switch (rt.getEventType()) { case RS_ZK_REQUEST_REGION_SPLIT: case RS_ZK_REGION_SPLITTING: case RS_ZK_REGION_SPLIT: if (!handleRegionSplitting( rt, encodedName, prettyPrintedRegionName, sn)) { deleteSplittingNode(encodedName, sn); } break; case RS_ZK_REQUEST_REGION_MERGE: case RS_ZK_REGION_MERGING: case RS_ZK_REGION_MERGED: // Merged region is a new region, we can't find it in the region states now. // However, the two merging regions are not new. They should be in state for merging. if (!handleRegionMerging( rt, encodedName, prettyPrintedRegionName, sn)) { deleteMergingNode(encodedName, sn); } break; case M_ZK_REGION_CLOSING: // Should see CLOSING after we have asked it to CLOSE or additional // times after already being in state of CLOSING if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) { LOG.warn("Received CLOSING for " + prettyPrintedRegionName + " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: " + regionStates.getRegionState(encodedName)); return; } // Transition to CLOSING (or update stamp if already CLOSING) regionStates.updateRegionState(rt, State.CLOSING); break; case RS_ZK_REGION_CLOSED: // Should see CLOSED after CLOSING but possible after PENDING_CLOSE if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) { LOG.warn("Received CLOSED for " + prettyPrintedRegionName + " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: " + regionStates.getRegionState(encodedName)); return; } // Handle CLOSED by assigning elsewhere or stopping if a disable // If we got here all is good. Need to update RegionState -- else // what follows will fail because not in expected state. new ClosedRegionHandler(server, this, regionState.getRegion()).process(); updateClosedRegionHandlerTracker(regionState.getRegion()); break; case RS_ZK_REGION_FAILED_OPEN: if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) { LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName + " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: " + regionStates.getRegionState(encodedName)); return; } AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName); if (failedOpenCount == null) { failedOpenCount = new AtomicInteger(); // No need to use putIfAbsent, or extra synchronization since // this whole handleRegion block is locked on the encoded region // name, and failedOpenTracker is updated only in this block failedOpenTracker.put(encodedName, failedOpenCount); } if (failedOpenCount.incrementAndGet() >= maximumAttempts) { regionStates.updateRegionState(rt, State.FAILED_OPEN); // remove the tracking info to save memory, also reset // the count for next open initiative failedOpenTracker.remove(encodedName); } else { // Handle this the same as if it were opened and then closed. regionState = regionStates.updateRegionState(rt, State.CLOSED); if (regionState != null) { // When there are more than one region server a new RS is selected as the // destination and the same is updated in the regionplan. (HBASE-5546) try { getRegionPlan(regionState.getRegion(), sn, true); new ClosedRegionHandler(server, this, regionState.getRegion()).process(); } catch (HBaseIOException e) { LOG.warn("Failed to get region plan", e); } } } break; case RS_ZK_REGION_OPENING: // Should see OPENING after we have asked it to OPEN or additional // times after already being in state of OPENING if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) { LOG.warn("Received OPENING for " + prettyPrintedRegionName + " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: " + regionStates.getRegionState(encodedName)); return; } // Transition to OPENING (or update stamp if already OPENING) regionStates.updateRegionState(rt, State.OPENING); break; case RS_ZK_REGION_OPENED: // Should see OPENED after OPENING but possible after PENDING_OPEN. if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) { LOG.warn("Received OPENED for " + prettyPrintedRegionName + " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: " + regionStates.getRegionState(encodedName)); if (regionState != null) { // Close it without updating the internal region states, // so as not to create double assignments in unlucky scenarios // mentioned in OpenRegionHandler#process unassign(regionState.getRegion(), null, -1, null, false, sn); } return; } // Handle OPENED by removing from transition and deleted zk node regionState = regionStates.updateRegionState(rt, State.OPEN); if (regionState != null) { failedOpenTracker.remove(encodedName); // reset the count, if any new OpenedRegionHandler( server, this, regionState.getRegion(), sn, expectedVersion).process(); updateOpenedRegionHandlerTracker(regionState.getRegion()); } break; default: throw new IllegalStateException("Received event is not valid."); } } finally { lock.unlock(); } } //For unit tests only boolean wasClosedHandlerCalled(HRegionInfo hri) { AtomicBoolean b = closedRegionHandlerCalled.get(hri); //compareAndSet to be sure that unit tests don't see stale values. Means, //we will return true exactly once unless the handler code resets to true //this value. return b == null ? false : b.compareAndSet(true, false); } //For unit tests only boolean wasOpenedHandlerCalled(HRegionInfo hri) { AtomicBoolean b = openedRegionHandlerCalled.get(hri); //compareAndSet to be sure that unit tests don't see stale values. Means, //we will return true exactly once unless the handler code resets to true //this value. return b == null ? false : b.compareAndSet(true, false); } //For unit tests only void initializeHandlerTrackers() { closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>(); openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>(); } void updateClosedRegionHandlerTracker(HRegionInfo hri) { if (closedRegionHandlerCalled != null) { //only for unit tests this is true closedRegionHandlerCalled.put(hri, new AtomicBoolean(true)); } } void updateOpenedRegionHandlerTracker(HRegionInfo hri) { if (openedRegionHandlerCalled != null) { //only for unit tests this is true openedRegionHandlerCalled.put(hri, new AtomicBoolean(true)); } } // TODO: processFavoredNodes might throw an exception, for e.g., if the // meta could not be contacted/updated. We need to see how seriously to treat // this problem as. Should we fail the current assignment. We should be able // to recover from this problem eventually (if the meta couldn't be updated // things should work normally and eventually get fixed up). void processFavoredNodes(List<HRegionInfo> regions) throws IOException { if (!shouldAssignRegionsWithFavoredNodes) return; // The AM gets the favored nodes info for each region and updates the meta // table with that info Map<HRegionInfo, List<ServerName>> regionToFavoredNodes = new HashMap<HRegionInfo, List<ServerName>>(); for (HRegionInfo region : regions) { regionToFavoredNodes.put(region, ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region)); } FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker); } /** * Handle a ZK unassigned node transition triggered by HBCK repair tool. * <p> * This is handled in a separate code path because it breaks the normal rules. * @param rt */ private void handleHBCK(RegionTransition rt) { String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName()); LOG.info("Handling HBCK triggered transition=" + rt.getEventType() + ", server=" + rt.getServerName() + ", region=" + HRegionInfo.prettyPrint(encodedName)); RegionState regionState = regionStates.getRegionTransitionState(encodedName); switch (rt.getEventType()) { case M_ZK_REGION_OFFLINE: HRegionInfo regionInfo; if (regionState != null) { regionInfo = regionState.getRegion(); } else { try { byte [] name = rt.getRegionName(); Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name); regionInfo = p.getFirst(); } catch (IOException e) { LOG.info("Exception reading hbase:meta doing HBCK repair operation", e); return; } } LOG.info("HBCK repair is triggering assignment of region=" + regionInfo.getRegionNameAsString()); // trigger assign, node is already in OFFLINE so don't need to update ZK assign(regionInfo, false); break; default: LOG.warn("Received unexpected region state from HBCK: " + rt.toString()); break; } } // ZooKeeper events /** * New unassigned node has been created. * * <p>This happens when an RS begins the OPENING or CLOSING of a region by * creating an unassigned node. * * <p>When this happens we must: * <ol> * <li>Watch the node for further events</li> * <li>Read and handle the state in the node</li> * </ol> */ @Override public void nodeCreated(String path) { handleAssignmentEvent(path); } /** * Existing unassigned node has had data changed. * * <p>This happens when an RS transitions from OFFLINE to OPENING, or between * OPENING/OPENED and CLOSING/CLOSED. * * <p>When this happens we must: * <ol> * <li>Watch the node for further events</li> * <li>Read and handle the state in the node</li> * </ol> */ @Override public void nodeDataChanged(String path) { handleAssignmentEvent(path); } // We don't want to have two events on the same region managed simultaneously. // For this reason, we need to wait if an event on the same region is currently in progress. // So we track the region names of the events in progress, and we keep a waiting list. private final Set<String> regionsInProgress = new HashSet<String>(); // In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need // this as we want the events to be managed in the same order as we received them. private final LinkedHashMultimap <String, RegionRunnable> zkEventWorkerWaitingList = LinkedHashMultimap.create(); /** * A specific runnable that works only on a region. */ private interface RegionRunnable extends Runnable{ /** * @return - the name of the region it works on. */ String getRegionName(); } /** * Submit a task, ensuring that there is only one task at a time that working on a given region. * Order is respected. */ protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) { synchronized (regionsInProgress) { // If we're there is already a task with this region, we add it to the // waiting list and return. if (regionsInProgress.contains(regRunnable.getRegionName())) { synchronized (zkEventWorkerWaitingList){ zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable); } return; } // No event in progress on this region => we can submit a new task immediately. regionsInProgress.add(regRunnable.getRegionName()); zkEventWorkers.submit(new Runnable() { @Override public void run() { try { regRunnable.run(); } finally { // now that we have finished, let's see if there is an event for the same region in the // waiting list. If it's the case, we can now submit it to the pool. synchronized (regionsInProgress) { regionsInProgress.remove(regRunnable.getRegionName()); synchronized (zkEventWorkerWaitingList) { java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get( regRunnable.getRegionName()); if (!waiting.isEmpty()) { // We want the first object only. The only way to get it is through an iterator. RegionRunnable toSubmit = waiting.iterator().next(); zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit); zkEventWorkersSubmit(toSubmit); } } } } } }); } } @Override public void nodeDeleted(final String path) { if (path.startsWith(watcher.assignmentZNode)) { final String regionName = ZKAssign.getRegionName(watcher, path); zkEventWorkersSubmit(new RegionRunnable() { @Override public String getRegionName() { return regionName; } @Override public void run() { Lock lock = locker.acquireLock(regionName); try { RegionState rs = regionStates.getRegionTransitionState(regionName); if (rs == null) { rs = regionStates.getRegionState(regionName); if (rs == null || !rs.isMergingNew()) { // MergingNew is an offline state return; } } HRegionInfo regionInfo = rs.getRegion(); String regionNameStr = regionInfo.getRegionNameAsString(); LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs); boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable()); ServerName serverName = rs.getServerName(); if (serverManager.isServerOnline(serverName)) { if (rs.isOnServer(serverName) && (rs.isOpened() || rs.isSplitting())) { regionOnline(regionInfo, serverName); if (disabled) { // if server is offline, no hurt to unassign again LOG.info("Opened " + regionNameStr + "but this table is disabled, triggering close of region"); unassign(regionInfo); } } else if (rs.isMergingNew()) { synchronized (regionStates) { String p = regionInfo.getEncodedName(); PairOfSameType<HRegionInfo> regions = mergingRegions.get(p); if (regions != null) { onlineMergingRegion(disabled, regions.getFirst(), serverName); onlineMergingRegion(disabled, regions.getSecond(), serverName); } } } } } finally { lock.unlock(); } } private void onlineMergingRegion(boolean disabled, final HRegionInfo hri, final ServerName serverName) { RegionState regionState = regionStates.getRegionState(hri); if (regionState != null && regionState.isMerging() && regionState.isOnServer(serverName)) { regionOnline(regionState.getRegion(), serverName); if (disabled) { unassign(hri); } } } }); } } /** * New unassigned node has been created. * * <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a * region by creating a znode. * * <p>When this happens we must: * <ol> * <li>Watch the node for further children changed events</li> * <li>Watch all new children for changed events</li> * </ol> */ @Override public void nodeChildrenChanged(String path) { if (path.equals(watcher.assignmentZNode)) { zkEventWorkers.submit(new Runnable() { @Override public void run() { try { // Just make sure we see the changes for the new znodes List<String> children = ZKUtil.listChildrenAndWatchForNewChildren( watcher, watcher.assignmentZNode); if (children != null) { Stat stat = new Stat(); for (String child : children) { // if region is in transition, we already have a watch // on it, so no need to watch it again. So, as I know for now, // this is needed to watch splitting nodes only. if (!regionStates.isRegionInTransition(child)) { ZKAssign.getDataAndWatch(watcher, child, stat); } } } } catch (KeeperException e) { server.abort("Unexpected ZK exception reading unassigned children", e); } } }); } } /** * Marks the region as online. Removes it from regions in transition and * updates the in-memory assignment information. * <p> * Used when a region has been successfully opened on a region server. * @param regionInfo * @param sn */ void regionOnline(HRegionInfo regionInfo, ServerName sn) { regionOnline(regionInfo, sn, HConstants.NO_SEQNUM); } void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) { numRegionsOpened.incrementAndGet(); regionStates.regionOnline(regionInfo, sn, openSeqNum); // Remove plan if one. clearRegionPlan(regionInfo); // Add the server to serversInUpdatingTimer addToServersInUpdatingTimer(sn); balancer.regionOnline(regionInfo, sn); // Tell our listeners that a region was opened sendRegionOpenedNotification(regionInfo, sn); } /** * Pass the assignment event to a worker for processing. * Each worker is a single thread executor service. The reason * for just one thread is to make sure all events for a given * region are processed in order. * * @param path */ private void handleAssignmentEvent(final String path) { if (path.startsWith(watcher.assignmentZNode)) { final String regionName = ZKAssign.getRegionName(watcher, path); zkEventWorkersSubmit(new RegionRunnable() { @Override public String getRegionName() { return regionName; } @Override public void run() { try { Stat stat = new Stat(); byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat); if (data == null) return; RegionTransition rt = RegionTransition.parseFrom(data); handleRegion(rt, stat.getVersion()); } catch (KeeperException e) { server.abort("Unexpected ZK exception reading unassigned node data", e); } catch (DeserializationException e) { server.abort("Unexpected exception deserializing node data", e); } } }); } } /** * Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater} * will update timers for this server in background * @param sn */ private void addToServersInUpdatingTimer(final ServerName sn) { if (tomActivated){ this.serversInUpdatingTimer.add(sn); } } /** * Touch timers for all regions in transition that have the passed * <code>sn</code> in common. * Call this method whenever a server checks in. Doing so helps the case where * a new regionserver has joined the cluster and its been given 1k regions to * open. If this method is tickled every time the region reports in a * successful open then the 1k-th region won't be timed out just because its * sitting behind the open of 999 other regions. This method is NOT used * as part of bulk assign -- there we have a different mechanism for extending * the regions in transition timer (we turn it off temporarily -- because * there is no regionplan involved when bulk assigning. * @param sn */ private void updateTimers(final ServerName sn) { Preconditions.checkState(tomActivated); if (sn == null) return; // This loop could be expensive. // First make a copy of current regionPlan rather than hold sync while // looping because holding sync can cause deadlock. Its ok in this loop // if the Map we're going against is a little stale List<Map.Entry<String, RegionPlan>> rps; synchronized(this.regionPlans) { rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet()); } for (Map.Entry<String, RegionPlan> e : rps) { if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) { RegionState regionState = regionStates.getRegionTransitionState(e.getKey()); if (regionState != null) { regionState.updateTimestampToNow(); } } } } /** * Marks the region as offline. Removes it from regions in transition and * removes in-memory assignment information. * <p> * Used when a region has been closed and should remain closed. * @param regionInfo */ public void regionOffline(final HRegionInfo regionInfo) { regionOffline(regionInfo, null); } public void offlineDisabledRegion(HRegionInfo regionInfo) { if (useZKForAssignment) { // Disabling so should not be reassigned, just delete the CLOSED node LOG.debug("Table being disabled so deleting ZK node and removing from " + "regions in transition, skipping assignment of region " + regionInfo.getRegionNameAsString()); String encodedName = regionInfo.getEncodedName(); deleteNodeInStates(encodedName, "closed", null, EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE); } regionOffline(regionInfo); } // Assignment methods /** * Assigns the specified region. * <p> * If a RegionPlan is available with a valid destination then it will be used * to determine what server region is assigned to. If no RegionPlan is * available, region will be assigned to a random available server. * <p> * Updates the RegionState and sends the OPEN RPC. * <p> * This will only succeed if the region is in transition and in a CLOSED or * OFFLINE state or not in transition (in-memory not zk), and of course, the * chosen server is up and running (It may have just crashed!). If the * in-memory checks pass, the zk node is forced to OFFLINE before assigning. * * @param region server to be assigned * @param setOfflineInZK whether ZK node should be created/transitioned to an * OFFLINE state before assigning the region */ public void assign(HRegionInfo region, boolean setOfflineInZK) { assign(region, setOfflineInZK, false); } /** * Use care with forceNewPlan. It could cause double assignment. */ public void assign(HRegionInfo region, boolean setOfflineInZK, boolean forceNewPlan) { if (isDisabledorDisablingRegionInRIT(region)) { return; } if (this.serverManager.isClusterShutdown()) { LOG.info("Cluster shutdown is set; skipping assign of " + region.getRegionNameAsString()); return; } String encodedName = region.getEncodedName(); Lock lock = locker.acquireLock(encodedName); try { RegionState state = forceRegionStateToOffline(region, forceNewPlan); if (state != null) { if (regionStates.wasRegionOnDeadServer(encodedName)) { LOG.info("Skip assigning " + region.getRegionNameAsString() + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) + " is dead but not processed yet"); return; } assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan); } } finally { lock.unlock(); } } /** * Bulk assign regions to <code>destination</code>. * @param destination * @param regions Regions to assign. * @return true if successful */ boolean assign(final ServerName destination, final List<HRegionInfo> regions) { long startTime = EnvironmentEdgeManager.currentTimeMillis(); try { int regionCount = regions.size(); if (regionCount == 0) { return true; } LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString()); Set<String> encodedNames = new HashSet<String>(regionCount); for (HRegionInfo region : regions) { encodedNames.add(region.getEncodedName()); } List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>(); Map<String, Lock> locks = locker.acquireLocks(encodedNames); try { AtomicInteger counter = new AtomicInteger(0); Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>(); OfflineCallback cb = new OfflineCallback( watcher, destination, counter, offlineNodesVersions); Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size()); List<RegionState> states = new ArrayList<RegionState>(regions.size()); for (HRegionInfo region : regions) { String encodedName = region.getEncodedName(); if (!isDisabledorDisablingRegionInRIT(region)) { RegionState state = forceRegionStateToOffline(region, false); boolean onDeadServer = false; if (state != null) { if (regionStates.wasRegionOnDeadServer(encodedName)) { LOG.info("Skip assigning " + region.getRegionNameAsString() + ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName) + " is dead but not processed yet"); onDeadServer = true; } else if (!useZKForAssignment || asyncSetOfflineInZooKeeper(state, cb, destination)) { RegionPlan plan = new RegionPlan(region, state.getServerName(), destination); plans.put(encodedName, plan); states.add(state); continue; } } // Reassign if the region wasn't on a dead server if (!onDeadServer) { LOG.info("failed to force region state to offline or " + "failed to set it offline in ZK, will reassign later: " + region); failedToOpenRegions.add(region); // assign individually later } } // Release the lock, this region is excluded from bulk assign because // we can't update its state, or set its znode to offline. Lock lock = locks.remove(encodedName); lock.unlock(); } if (useZKForAssignment) { // Wait until all unassigned nodes have been put up and watchers set. int total = states.size(); for (int oldCounter = 0; !server.isStopped();) { int count = counter.get(); if (oldCounter != count) { LOG.info(destination.toString() + " unassigned znodes=" + count + " of total=" + total); oldCounter = count; } if (count >= total) break; Threads.sleep(5); } } if (server.isStopped()) { return false; } // Add region plans, so we can updateTimers when one region is opened so // that unnecessary timeout on RIT is reduced. this.addPlans(plans); List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos = new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size()); for (RegionState state: states) { HRegionInfo region = state.getRegion(); String encodedRegionName = region.getEncodedName(); Integer nodeVersion = offlineNodesVersions.get(encodedRegionName); if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) { LOG.warn("failed to offline in zookeeper: " + region); failedToOpenRegions.add(region); // assign individually later Lock lock = locks.remove(encodedRegionName); lock.unlock(); } else { regionStates.updateRegionState( region, State.PENDING_OPEN, destination); List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST; if (this.shouldAssignRegionsWithFavoredNodes) { favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); } regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>( region, nodeVersion, favoredNodes)); } } // Move on to open regions. try { // Send OPEN RPC. If it fails on a IOE or RemoteException, // regions will be assigned individually. long maxWaitTime = System.currentTimeMillis() + this.server.getConfiguration(). getLong("hbase.regionserver.rpc.startup.waittime", 60000); for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) { try { // regionOpenInfos is empty if all regions are in failedToOpenRegions list if (regionOpenInfos.isEmpty()) { break; } List<RegionOpeningState> regionOpeningStateList = serverManager .sendRegionOpen(destination, regionOpenInfos); if (regionOpeningStateList == null) { // Failed getting RPC connection to this server return false; } for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) { RegionOpeningState openingState = regionOpeningStateList.get(k); if (openingState != RegionOpeningState.OPENED) { HRegionInfo region = regionOpenInfos.get(k).getFirst(); if (openingState == RegionOpeningState.ALREADY_OPENED) { processAlreadyOpenedRegion(region, destination); } else if (openingState == RegionOpeningState.FAILED_OPENING) { // Failed opening this region, reassign it later failedToOpenRegions.add(region); } else { LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state " + openingState + " in assigning region " + region); } } } break; } catch (IOException e) { if (e instanceof RemoteException) { e = ((RemoteException)e).unwrapRemoteException(); } if (e instanceof RegionServerStoppedException) { LOG.warn("The region server was shut down, ", e); // No need to retry, the region server is a goner. return false; } else if (e instanceof ServerNotRunningYetException) { long now = System.currentTimeMillis(); if (now < maxWaitTime) { LOG.debug("Server is not yet up; waiting up to " + (maxWaitTime - now) + "ms", e); Thread.sleep(100); i--; // reset the try count continue; } } else if (e instanceof java.net.SocketTimeoutException && this.serverManager.isServerOnline(destination)) { // In case socket is timed out and the region server is still online, // the openRegion RPC could have been accepted by the server and // just the response didn't go through. So we will retry to // open the region on the same server. if (LOG.isDebugEnabled()) { LOG.debug("Bulk assigner openRegion() to " + destination + " has timed out, but the regions might" + " already be opened on it.", e); } // wait and reset the re-try count, server might be just busy. Thread.sleep(100); i--; continue; } throw e; } } } catch (IOException e) { // Can be a socket timeout, EOF, NoRouteToHost, etc LOG.info("Unable to communicate with " + destination + " in order to assign regions, ", e); return false; } catch (InterruptedException e) { throw new RuntimeException(e); } } finally { for (Lock lock : locks.values()) { lock.unlock(); } } if (!failedToOpenRegions.isEmpty()) { for (HRegionInfo region : failedToOpenRegions) { if (!regionStates.isRegionOnline(region)) { invokeAssign(region); } } } LOG.debug("Bulk assigning done for " + destination); return true; } finally { metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); } } /** * Send CLOSE RPC if the server is online, otherwise, offline the region. * * The RPC will be sent only to the region sever found in the region state * if it is passed in, otherwise, to the src server specified. If region * state is not specified, we don't update region state at all, instead * we just send the RPC call. This is useful for some cleanup without * messing around the region states (see handleRegion, on region opened * on an unexpected server scenario, for an example) */ private void unassign(final HRegionInfo region, final RegionState state, final int versionOfClosingNode, final ServerName dest, final boolean transitionInZK, final ServerName src) { ServerName server = src; if (state != null) { server = state.getServerName(); } long maxWaitTime = -1; for (int i = 1; i <= this.maximumAttempts; i++) { if (this.server.isStopped() || this.server.isAborted()) { LOG.debug("Server stopped/aborted; skipping unassign of " + region); return; } // ClosedRegionhandler can remove the server from this.regions if (!serverManager.isServerOnline(server)) { LOG.debug("Offline " + region.getRegionNameAsString() + ", no need to unassign since it's on a dead server: " + server); if (transitionInZK) { // delete the node. if no node exists need not bother. deleteClosingOrClosedNode(region, server); } if (state != null) { regionOffline(region); } return; } try { // Send CLOSE RPC if (serverManager.sendRegionClose(server, region, versionOfClosingNode, dest, transitionInZK)) { LOG.debug("Sent CLOSE to " + server + " for region " + region.getRegionNameAsString()); if (useZKForAssignment && !transitionInZK && state != null) { // Retry to make sure the region is // closed so as to avoid double assignment. unassign(region, state, versionOfClosingNode, dest, transitionInZK, src); } return; } // This never happens. Currently regionserver close always return true. // Todo; this can now happen (0.96) if there is an exception in a coprocessor LOG.warn("Server " + server + " region CLOSE RPC returned false for " + region.getRegionNameAsString()); } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException)t).unwrapRemoteException(); } boolean logRetries = true; if (t instanceof NotServingRegionException || t instanceof RegionServerStoppedException || t instanceof ServerNotRunningYetException) { LOG.debug("Offline " + region.getRegionNameAsString() + ", it's not any more on " + server, t); if (transitionInZK) { deleteClosingOrClosedNode(region, server); } if (state != null) { regionOffline(region); } return; } else if ((t instanceof FailedServerException) || (state != null && t instanceof RegionAlreadyInTransitionException)) { long sleepTime = 0; Configuration conf = this.server.getConfiguration(); if(t instanceof FailedServerException) { sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT); } else { // RS is already processing this region, only need to update the timestamp LOG.debug("update " + state + " the timestamp."); state.updateTimestampToNow(); if (maxWaitTime < 0) { maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + conf.getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } long now = EnvironmentEdgeManager.currentTimeMillis(); if (now < maxWaitTime) { LOG.debug("Region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); sleepTime = 100; i--; // reset the try count logRetries = false; } } try { if (sleepTime > 0) { Thread.sleep(sleepTime); } } catch (InterruptedException ie) { LOG.warn("Failed to unassign " + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); if (!tomActivated && state != null) { regionStates.updateRegionState(region, State.FAILED_CLOSE); } return; } } if (logRetries) { LOG.info("Server " + server + " returned " + t + " for " + region.getRegionNameAsString() + ", try=" + i + " of " + this.maximumAttempts, t); // Presume retry or server will expire. } } } // Run out of attempts if (!tomActivated && state != null) { regionStates.updateRegionState(region, State.FAILED_CLOSE); } } /** * Set region to OFFLINE unless it is opening and forceNewPlan is false. */ private RegionState forceRegionStateToOffline( final HRegionInfo region, final boolean forceNewPlan) { RegionState state = regionStates.getRegionState(region); if (state == null) { LOG.warn("Assigning a region not in region states: " + region); state = regionStates.createRegionState(region); } ServerName sn = state.getServerName(); if (forceNewPlan && LOG.isDebugEnabled()) { LOG.debug("Force region state offline " + state); } switch (state.getState()) { case OPEN: case OPENING: case PENDING_OPEN: case CLOSING: case PENDING_CLOSE: if (!forceNewPlan) { LOG.debug("Skip assigning " + region + ", it is already " + state); return null; } case FAILED_CLOSE: case FAILED_OPEN: unassign(region, state, -1, null, false, null); state = regionStates.getRegionState(region); if (state.isFailedClose()) { // If we can't close the region, we can't re-assign // it so as to avoid possible double assignment/data loss. LOG.info("Skip assigning " + region + ", we couldn't close it: " + state); return null; } case OFFLINE: // This region could have been open on this server // for a while. If the server is dead and not processed // yet, we can move on only if the meta shows the // region is not on this server actually, or on a server // not dead, or dead and processed already. // In case not using ZK, we don't need this check because // we have the latest info in memory, and the caller // will do another round checking any way. if (useZKForAssignment && regionStates.isServerDeadAndNotProcessed(sn) && wasRegionOnDeadServerByMeta(region, sn)) { if (!regionStates.isRegionInTransition(region)) { LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH"); regionStates.updateRegionState(region, State.OFFLINE); } LOG.info("Skip assigning " + region.getRegionNameAsString() + ", it is on a dead but not processed yet server: " + sn); return null; } case CLOSED: break; default: LOG.error("Trying to assign region " + region + ", which is " + state); return null; } return state; } private boolean wasRegionOnDeadServerByMeta( final HRegionInfo region, final ServerName sn) { try { if (region.isMetaRegion()) { ServerName server = catalogTracker.getMetaLocation(); return regionStates.isServerDeadAndNotProcessed(server); } while (!server.isStopped()) { try { catalogTracker.waitForMeta(); Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName()); if (r == null || r.isEmpty()) return false; ServerName server = HRegionInfo.getServerName(r); return regionStates.isServerDeadAndNotProcessed(server); } catch (IOException ioe) { LOG.info("Received exception accessing hbase:meta during force assign " + region.getRegionNameAsString() + ", retrying", ioe); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.info("Interrupted accessing hbase:meta", e); } // Call is interrupted or server is stopped. return regionStates.isServerDeadAndNotProcessed(sn); } /** * Caller must hold lock on the passed <code>state</code> object. * @param state * @param setOfflineInZK * @param forceNewPlan */ private void assign(RegionState state, final boolean setOfflineInZK, final boolean forceNewPlan) { long startTime = EnvironmentEdgeManager.currentTimeMillis(); try { Configuration conf = server.getConfiguration(); RegionState currentState = state; int versionOfOfflineNode = -1; RegionPlan plan = null; long maxWaitTime = -1; HRegionInfo region = state.getRegion(); RegionOpeningState regionOpenState; Throwable previousException = null; for (int i = 1; i <= maximumAttempts; i++) { if (server.isStopped() || server.isAborted()) { LOG.info("Skip assigning " + region.getRegionNameAsString() + ", the server is stopped/aborted"); return; } if (plan == null) { // Get a server for the region at first try { plan = getRegionPlan(region, forceNewPlan); } catch (HBaseIOException e) { LOG.warn("Failed to get region plan", e); } } if (plan == null) { LOG.warn("Unable to determine a plan to assign " + region); if (tomActivated){ this.timeoutMonitor.setAllRegionServersOffline(true); } else { if (region.isMetaRegion()) { try { Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment); if (i == maximumAttempts) i = 1; continue; } catch (InterruptedException e) { LOG.error("Got exception while waiting for hbase:meta assignment"); Thread.currentThread().interrupt(); } } regionStates.updateRegionState(region, State.FAILED_OPEN); } return; } if (setOfflineInZK && versionOfOfflineNode == -1) { // get the version of the znode after setting it to OFFLINE. // versionOfOfflineNode will be -1 if the znode was not set to OFFLINE versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination()); if (versionOfOfflineNode != -1) { if (isDisabledorDisablingRegionInRIT(region)) { return; } // In case of assignment from EnableTableHandler table state is ENABLING. Any how // EnableTableHandler will set ENABLED after assigning all the table regions. If we // try to set to ENABLED directly then client API may think table is enabled. // When we have a case such as all the regions are added directly into hbase:meta and we call // assignRegion then we need to make the table ENABLED. Hence in such case the table // will not be in ENABLING or ENABLED state. TableName tableName = region.getTable(); if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); } } } if (setOfflineInZK && versionOfOfflineNode == -1) { LOG.info("Unable to set offline in ZooKeeper to assign " + region); // Setting offline in ZK must have been failed due to ZK racing or some // exception which may make the server to abort. If it is ZK racing, // we should retry since we already reset the region state, // existing (re)assignment will fail anyway. if (!server.isAborted()) { continue; } } LOG.info("Assigning " + region.getRegionNameAsString() + " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN currentState = regionStates.updateRegionState(region, State.PENDING_OPEN, plan.getDestination()); boolean needNewPlan; final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + " to " + plan.getDestination(); try { List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST; if (this.shouldAssignRegionsWithFavoredNodes) { favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); } regionOpenState = serverManager.sendRegionOpen( plan.getDestination(), region, versionOfOfflineNode, favoredNodes); if (regionOpenState == RegionOpeningState.FAILED_OPENING) { // Failed opening this region, looping again on a new server. needNewPlan = true; LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " + " trying to assign elsewhere instead; " + "try=" + i + " of " + this.maximumAttempts); } else { // we're done if (regionOpenState == RegionOpeningState.ALREADY_OPENED) { processAlreadyOpenedRegion(region, plan.getDestination()); } return; } } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException) t).unwrapRemoteException(); } previousException = t; // Should we wait a little before retrying? If the server is starting it's yes. // If the region is already in transition, it's yes as well: we want to be sure that // the region will get opened but we don't want a double assignment. boolean hold = (t instanceof RegionAlreadyInTransitionException || t instanceof ServerNotRunningYetException); // In case socket is timed out and the region server is still online, // the openRegion RPC could have been accepted by the server and // just the response didn't go through. So we will retry to // open the region on the same server to avoid possible // double assignment. boolean retry = !hold && (t instanceof java.net.SocketTimeoutException && this.serverManager.isServerOnline(plan.getDestination())); if (hold) { LOG.warn(assignMsg + ", waiting a little before trying on the same region server " + "try=" + i + " of " + this.maximumAttempts, t); if (maxWaitTime < 0) { if (t instanceof RegionAlreadyInTransitionException) { maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME, DEFAULT_ALREADY_IN_TRANSITION_WAITTIME); } else { maxWaitTime = EnvironmentEdgeManager.currentTimeMillis() + this.server.getConfiguration().getLong( "hbase.regionserver.rpc.startup.waittime", 60000); } } try { needNewPlan = false; long now = EnvironmentEdgeManager.currentTimeMillis(); if (now < maxWaitTime) { LOG.debug("Server is not yet up or region is already in transition; " + "waiting up to " + (maxWaitTime - now) + "ms", t); Thread.sleep(100); i--; // reset the try count } else if (!(t instanceof RegionAlreadyInTransitionException)) { LOG.debug("Server is not up for a while; try a new one", t); needNewPlan = true; } } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); if (!tomActivated) { regionStates.updateRegionState(region, State.FAILED_OPEN); } return; } } else if (retry) { needNewPlan = false; i--; // we want to retry as many times as needed as long as the RS is not dead. LOG.warn(assignMsg + ", trying to assign to the same region server due ", t); } else { needNewPlan = true; LOG.warn(assignMsg + ", trying to assign elsewhere instead;" + " try=" + i + " of " + this.maximumAttempts, t); } } if (i == this.maximumAttempts) { // Don't reset the region state or get a new plan any more. // This is the last try. continue; } // If region opened on destination of present plan, reassigning to new // RS may cause double assignments. In case of RegionAlreadyInTransitionException // reassigning to same RS. if (needNewPlan) { // Force a new plan and reassign. Will return null if no servers. // The new plan could be the same as the existing plan since we don't // exclude the server of the original plan, which should not be // excluded since it could be the only server up now. RegionPlan newPlan = null; try { newPlan = getRegionPlan(region, true); } catch (HBaseIOException e) { LOG.warn("Failed to get region plan", e); } if (newPlan == null) { if (tomActivated) { this.timeoutMonitor.setAllRegionServersOffline(true); } else { regionStates.updateRegionState(region, State.FAILED_OPEN); } LOG.warn("Unable to find a viable location to assign region " + region.getRegionNameAsString()); return; } if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) { // Clean out plan we failed execute and one that doesn't look like it'll // succeed anyways; we need a new plan! // Transition back to OFFLINE currentState = regionStates.updateRegionState(region, State.OFFLINE); versionOfOfflineNode = -1; plan = newPlan; } else if(plan.getDestination().equals(newPlan.getDestination()) && previousException instanceof FailedServerException) { try { LOG.info("Trying to re-assign " + region.getRegionNameAsString() + " to the same failed server."); Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY, RpcClient.FAILED_SERVER_EXPIRY_DEFAULT)); } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); if (!tomActivated) { regionStates.updateRegionState(region, State.FAILED_OPEN); } return; } } } } // Run out of attempts if (!tomActivated) { regionStates.updateRegionState(region, State.FAILED_OPEN); } } finally { metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); } } private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) { // Remove region from in-memory transition and unassigned node from ZK // While trying to enable the table the regions of the table were // already enabled. LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString() + " to " + sn); String encodedName = region.getEncodedName(); deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE); regionStates.regionOnline(region, sn); } private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { TableName tableName = region.getTable(); boolean disabled = this.zkTable.isDisabledTable(tableName); if (disabled || this.zkTable.isDisablingTable(tableName)) { LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") + " skipping assign of " + region.getRegionNameAsString()); offlineDisabledRegion(region); return true; } return false; } /** * Set region as OFFLINED up in zookeeper * * @param state * @return the version of the offline node if setting of the OFFLINE node was * successful, -1 otherwise. */ private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) { if (!state.isClosed() && !state.isOffline()) { String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE."; this.server.abort(msg, new IllegalStateException(msg)); return -1; } regionStates.updateRegionState(state.getRegion(), State.OFFLINE); int versionOfOfflineNode; try { // get the version after setting the znode to OFFLINE versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher, state.getRegion(), destination); if (versionOfOfflineNode == -1) { LOG.warn("Attempted to create/force node into OFFLINE state before " + "completing assignment but failed to do so for " + state); return -1; } } catch (KeeperException e) { server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); return -1; } return versionOfOfflineNode; } /** * @param region the region to assign * @return Plan for passed <code>region</code> (If none currently, it creates one or * if no servers to assign, it returns null). */ private RegionPlan getRegionPlan(final HRegionInfo region, final boolean forceNewPlan) throws HBaseIOException { return getRegionPlan(region, null, forceNewPlan); } /** * @param region the region to assign * @param serverToExclude Server to exclude (we know its bad). Pass null if * all servers are thought to be assignable. * @param forceNewPlan If true, then if an existing plan exists, a new plan * will be generated. * @return Plan for passed <code>region</code> (If none currently, it creates one or * if no servers to assign, it returns null). */ private RegionPlan getRegionPlan(final HRegionInfo region, final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException { // Pickup existing plan or make a new one final String encodedName = region.getEncodedName(); final List<ServerName> destServers = serverManager.createDestinationServersList(serverToExclude); if (destServers.isEmpty()){ LOG.warn("Can't move " + encodedName + ", there is no destination server available."); return null; } RegionPlan randomPlan = null; boolean newPlan = false; RegionPlan existingPlan; synchronized (this.regionPlans) { existingPlan = this.regionPlans.get(encodedName); if (existingPlan != null && existingPlan.getDestination() != null) { LOG.debug("Found an existing plan for " + region.getRegionNameAsString() + " destination server is " + existingPlan.getDestination() + " accepted as a dest server = " + destServers.contains(existingPlan.getDestination())); } if (forceNewPlan || existingPlan == null || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; randomPlan = new RegionPlan(region, null, balancer.randomAssignment(region, destServers)); if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1); regions.add(region); try { processFavoredNodes(regions); } catch (IOException ie) { LOG.warn("Ignoring exception in processFavoredNodes " + ie); } } this.regionPlans.put(encodedName, randomPlan); } } if (newPlan) { if (randomPlan.getDestination() == null) { LOG.warn("Can't find a destination for " + encodedName); return null; } LOG.debug("No previous transition plan found (or ignoring " + "an existing plan) for " + region.getRegionNameAsString() + "; generated random plan=" + randomPlan + "; " + serverManager.countOfRegionServers() + " (online=" + serverManager.getOnlineServers().size() + ", available=" + destServers.size() + ") available servers" + ", forceNewPlan=" + forceNewPlan); return randomPlan; } LOG.debug("Using pre-existing plan for " + region.getRegionNameAsString() + "; plan=" + existingPlan); return existingPlan; } /** * Unassigns the specified region. * <p> * Updates the RegionState and sends the CLOSE RPC unless region is being * split by regionserver; then the unassign fails (silently) because we * presume the region being unassigned no longer exists (its been split out * of existence). TODO: What to do if split fails and is rolled back and * parent is revivified? * <p> * If a RegionPlan is already set, it will remain. * * @param region server to be unassigned */ public void unassign(HRegionInfo region) { unassign(region, false); } /** * Unassigns the specified region. * <p> * Updates the RegionState and sends the CLOSE RPC unless region is being * split by regionserver; then the unassign fails (silently) because we * presume the region being unassigned no longer exists (its been split out * of existence). TODO: What to do if split fails and is rolled back and * parent is revivified? * <p> * If a RegionPlan is already set, it will remain. * * @param region server to be unassigned * @param force if region should be closed even if already closing */ public void unassign(HRegionInfo region, boolean force, ServerName dest) { // TODO: Method needs refactoring. Ugly buried returns throughout. Beware! LOG.debug("Starting unassign of " + region.getRegionNameAsString() + " (offlining), current state: " + regionStates.getRegionState(region)); String encodedName = region.getEncodedName(); // Grab the state of this region and synchronize on it int versionOfClosingNode = -1; // We need a lock here as we're going to do a put later and we don't want multiple states // creation ReentrantLock lock = locker.acquireLock(encodedName); RegionState state = regionStates.getRegionTransitionState(encodedName); boolean reassign = true; try { if (state == null) { // Region is not in transition. // We can unassign it only if it's not SPLIT/MERGED. state = regionStates.getRegionState(encodedName); if (state != null && state.isUnassignable()) { LOG.info("Attempting to unassign " + state + ", ignored"); // Offline region will be reassigned below return; } // Create the znode in CLOSING state try { if (state == null || state.getServerName() == null) { // We don't know where the region is, offline it. // No need to send CLOSE RPC LOG.warn("Attempting to unassign a region not in RegionStates" + region.getRegionNameAsString() + ", offlined"); regionOffline(region); return; } if (useZKForAssignment) { versionOfClosingNode = ZKAssign.createNodeClosing( watcher, region, state.getServerName()); if (versionOfClosingNode == -1) { LOG.info("Attempting to unassign " + region.getRegionNameAsString() + " but ZK closing node " + "can't be created."); reassign = false; // not unassigned at all return; } } } catch (KeeperException e) { if (e instanceof NodeExistsException) { // Handle race between master initiated close and regionserver // orchestrated splitting. See if existing node is in a // SPLITTING or SPLIT state. If so, the regionserver started // an op on node before we could get our CLOSING in. Deal. NodeExistsException nee = (NodeExistsException)e; String path = nee.getPath(); try { if (isSplitOrSplittingOrMergedOrMerging(path)) { LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " + "skipping unassign because region no longer exists -- its split or merge"); reassign = false; // no need to reassign for split/merged region return; } } catch (KeeperException.NoNodeException ke) { LOG.warn("Failed getData on SPLITTING/SPLIT at " + path + "; presuming split and that the region to unassign, " + encodedName + ", no longer exists -- confirm", ke); return; } catch (KeeperException ke) { LOG.error("Unexpected zk state", ke); } catch (DeserializationException de) { LOG.error("Failed parse", de); } } // If we get here, don't understand whats going on -- abort. server.abort("Unexpected ZK exception creating node CLOSING", e); reassign = false; // heading out already return; } state = regionStates.updateRegionState(region, State.PENDING_CLOSE); } else if (state.isFailedOpen()) { // The region is not open yet regionOffline(region); return; } else if (force && state.isPendingCloseOrClosing()) { LOG.debug("Attempting to unassign " + region.getRegionNameAsString() + " which is already " + state.getState() + " but forcing to send a CLOSE RPC again "); if (state.isFailedClose()) { state = regionStates.updateRegionState(region, State.PENDING_CLOSE); } state.updateTimestampToNow(); } else { LOG.debug("Attempting to unassign " + region.getRegionNameAsString() + " but it is " + "already in transition (" + state.getState() + ", force=" + force + ")"); return; } unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null); } finally { lock.unlock(); // Region is expected to be reassigned afterwards if (reassign && regionStates.isRegionOffline(region)) { assign(region, true); } } } public void unassign(HRegionInfo region, boolean force){ unassign(region, force, null); } /** * @param region regioninfo of znode to be deleted. */ public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) { String encodedName = region.getEncodedName(); deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING, EventType.RS_ZK_REGION_CLOSED); } /** * @param path * @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state. * @throws KeeperException Can happen if the znode went away in meantime. * @throws DeserializationException */ private boolean isSplitOrSplittingOrMergedOrMerging(final String path) throws KeeperException, DeserializationException { boolean result = false; // This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets // cleaned up before we can get data from it. byte [] data = ZKAssign.getData(watcher, path); if (data == null) { LOG.info("Node " + path + " is gone"); return false; } RegionTransition rt = RegionTransition.parseFrom(data); switch (rt.getEventType()) { case RS_ZK_REQUEST_REGION_SPLIT: case RS_ZK_REGION_SPLIT: case RS_ZK_REGION_SPLITTING: case RS_ZK_REQUEST_REGION_MERGE: case RS_ZK_REGION_MERGED: case RS_ZK_REGION_MERGING: result = true; break; default: LOG.info("Node " + path + " is in " + rt.getEventType()); break; } return result; } /** * Used by unit tests. Return the number of regions opened so far in the life * of the master. Increases by one every time the master opens a region * @return the counter value of the number of regions opened so far */ public int getNumRegionsOpened() { return numRegionsOpened.get(); } /** * Waits until the specified region has completed assignment. * <p> * If the region is already assigned, returns immediately. Otherwise, method * blocks until the region is assigned. * @param regionInfo region to wait on assignment for * @throws InterruptedException */ public boolean waitForAssignment(HRegionInfo regionInfo) throws InterruptedException { while (!regionStates.isRegionOnline(regionInfo)) { if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN) || this.server.isStopped()) { return false; } // We should receive a notification, but it's // better to have a timeout to recheck the condition here: // it lowers the impact of a race condition if any regionStates.waitForUpdate(100); } return true; } /** * Assigns the hbase:meta region. * <p> * Assumes that hbase:meta is currently closed and is not being actively served by * any RegionServer. * <p> * Forcibly unsets the current meta region location in ZooKeeper and assigns * hbase:meta to a random RegionServer. * @throws KeeperException */ public void assignMeta() throws KeeperException { MetaRegionTracker.deleteMetaLocation(this.watcher); assign(HRegionInfo.FIRST_META_REGIONINFO, true); } /** * Assigns specified regions retaining assignments, if any. * <p> * This is a synchronous call and will return once every region has been * assigned. If anything fails, an exception is thrown * @throws InterruptedException * @throws IOException */ public void assign(Map<HRegionInfo, ServerName> regions) throws IOException, InterruptedException { if (regions == null || regions.isEmpty()) { return; } List<ServerName> servers = serverManager.createDestinationServersList(); if (servers == null || servers.isEmpty()) { throw new IOException("Found no destination server to assign region(s)"); } // Reuse existing assignment info Map<ServerName, List<HRegionInfo>> bulkPlan = balancer.retainAssignment(regions, servers); assign(regions.size(), servers.size(), "retainAssignment=true", bulkPlan); } /** * Assigns specified regions round robin, if any. * <p> * This is a synchronous call and will return once every region has been * assigned. If anything fails, an exception is thrown * @throws InterruptedException * @throws IOException */ public void assign(List<HRegionInfo> regions) throws IOException, InterruptedException { if (regions == null || regions.isEmpty()) { return; } List<ServerName> servers = serverManager.createDestinationServersList(); if (servers == null || servers.isEmpty()) { throw new IOException("Found no destination server to assign region(s)"); } // Generate a round-robin bulk assignment plan Map<ServerName, List<HRegionInfo>> bulkPlan = balancer.roundRobinAssignment(regions, servers); processFavoredNodes(regions); assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); } private void assign(int regions, int totalServers, String message, Map<ServerName, List<HRegionInfo>> bulkPlan) throws InterruptedException, IOException { int servers = bulkPlan.size(); if (servers == 1 || (regions < bulkAssignThresholdRegions && servers < bulkAssignThresholdServers)) { // Not use bulk assignment. This could be more efficient in small // cluster, especially mini cluster for testing, so that tests won't time out if (LOG.isTraceEnabled()) { LOG.trace("Not using bulk assignment since we are assigning only " + regions + " region(s) to " + servers + " server(s)"); } for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) { if (!assign(plan.getKey(), plan.getValue())) { for (HRegionInfo region: plan.getValue()) { if (!regionStates.isRegionOnline(region)) { invokeAssign(region); } } } } } else { LOG.info("Bulk assigning " + regions + " region(s) across " + totalServers + " server(s), " + message); // Use fixed count thread pool assigning. BulkAssigner ba = new GeneralBulkAssigner( this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned); ba.bulkAssign(); LOG.info("Bulk assigning done"); } } /** * Assigns all user regions, if any exist. Used during cluster startup. * <p> * This is a synchronous call and will return once every region has been * assigned. If anything fails, an exception is thrown and the cluster * should be shutdown. * @throws InterruptedException * @throws IOException * @throws KeeperException */ private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling) throws IOException, InterruptedException, KeeperException { // Skip assignment for regions of tables in DISABLING state because during clean cluster startup // no RS is alive and regions map also doesn't have any information about the regions. // See HBASE-6281. // Scan hbase:meta for all user regions, skipping any disabled tables Map<HRegionInfo, ServerName> allRegions; SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment = new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true); snapshotOfRegionAssignment.initialize(); allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap(); if (allRegions == null || allRegions.isEmpty()) { return; } // Determine what type of assignment to do on startup boolean retainAssignment = server.getConfiguration(). getBoolean("hbase.master.startup.retainassign", true); if (retainAssignment) { assign(allRegions); } else { List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet()); assign(regions); } for (HRegionInfo hri : allRegions.keySet()) { TableName tableName = hri.getTable(); if (!zkTable.isEnabledTable(tableName)) { setEnabledTable(tableName); } } } /** * Wait until no regions in transition. * @param timeout How long to wait. * @return True if nothing in regions in transition. * @throws InterruptedException */ boolean waitUntilNoRegionsInTransition(final long timeout) throws InterruptedException { // Blocks until there are no regions in transition. It is possible that // there // are regions in transition immediately after this returns but guarantees // that if it returns without an exception that there was a period of time // with no regions in transition from the point-of-view of the in-memory // state of the Master. final long endTime = System.currentTimeMillis() + timeout; while (!this.server.isStopped() && regionStates.isRegionsInTransition() && endTime > System.currentTimeMillis()) { regionStates.waitForUpdate(100); } return !regionStates.isRegionsInTransition(); } /** * Rebuild the list of user regions and assignment information. * <p> * Returns a map of servers that are not found to be online and the regions * they were hosting. * @return map of servers not online to their assigned regions, as stored * in META * @throws IOException */ Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException { Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher); Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher); disabledOrEnablingTables.addAll(enablingTables); Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher); disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables); // Region assignment from META List<Result> results = MetaReader.fullScan(this.catalogTracker); // Get any new but slow to checkin region server that joined the cluster Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet(); // Map of offline servers and their regions to be returned Map<ServerName, List<HRegionInfo>> offlineServers = new TreeMap<ServerName, List<HRegionInfo>>(); // Iterate regions in META for (Result result : results) { HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result); if (regionInfo == null) continue; State state = RegionStateStore.getRegionState(result); ServerName regionLocation = RegionStateStore.getRegionServer(result); regionStates.createRegionState(regionInfo, state, regionLocation); if (!regionStates.isRegionInState(regionInfo, State.OPEN)) { // Region is not open (either offline or in transition), skip continue; } TableName tableName = regionInfo.getTable(); if (!onlineServers.contains(regionLocation)) { // Region is located on a server that isn't online List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation); if (offlineRegions == null) { offlineRegions = new ArrayList<HRegionInfo>(1); offlineServers.put(regionLocation, offlineRegions); } if (useZKForAssignment) { regionStates.regionOffline(regionInfo); } offlineRegions.add(regionInfo); } else if (!disabledOrEnablingTables.contains(tableName)) { // Region is being served and on an active server // add only if region not in disabled or enabling table regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation); regionStates.regionOnline(regionInfo, regionLocation); balancer.regionOnline(regionInfo, regionLocation); } else if (useZKForAssignment) { regionStates.regionOffline(regionInfo); } // need to enable the table if not disabled or disabling or enabling // this will be used in rolling restarts if (!disabledOrDisablingOrEnabling.contains(tableName) && !getZKTable().isEnabledTable(tableName)) { setEnabledTable(tableName); } } return offlineServers; } /** * Recover the tables that were not fully moved to DISABLED state. These * tables are in DISABLING state when the master restarted/switched. * * @throws KeeperException * @throws TableNotFoundException * @throws IOException */ private void recoverTableInDisablingState() throws KeeperException, TableNotFoundException, IOException { Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher); if (disablingTables.size() != 0) { for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler LOG.info("The table " + tableName + " is in DISABLING state. Hence recovering by moving the table" + " to DISABLED state."); new DisableTableHandler(this.server, tableName, catalogTracker, this, tableLockManager, true).prepare().process(); } } } /** * Recover the tables that are not fully moved to ENABLED state. These tables * are in ENABLING state when the master restarted/switched * * @throws KeeperException * @throws org.apache.hadoop.hbase.TableNotFoundException * @throws IOException */ private void recoverTableInEnablingState() throws KeeperException, TableNotFoundException, IOException { Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher); if (enablingTables.size() != 0) { for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler LOG.info("The table " + tableName + " is in ENABLING state. Hence recovering by moving the table" + " to ENABLED state."); // enableTable in sync way during master startup, // no need to invoke coprocessor EnableTableHandler eth = new EnableTableHandler(this.server, tableName, catalogTracker, this, tableLockManager, true); try { eth.prepare(); } catch (TableNotFoundException e) { LOG.warn("Table " + tableName + " not found in hbase:meta to recover."); continue; } eth.process(); } } } /** * Processes list of dead servers from result of hbase:meta scan and regions in RIT * <p> * This is used for failover to recover the lost regions that belonged to * RegionServers which failed while there was no active master or regions * that were in RIT. * <p> * * * @param deadServers * The list of dead servers which failed while there was no active * master. Can be null. * @throws IOException * @throws KeeperException */ private void processDeadServersAndRecoverLostRegions( Map<ServerName, List<HRegionInfo>> deadServers) throws IOException, KeeperException { if (deadServers != null) { for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) { ServerName serverName = server.getKey(); // We need to keep such info even if the server is known dead regionStates.setLastRegionServerOfRegions(serverName, server.getValue()); if (!serverManager.isServerDead(serverName)) { serverManager.expireServer(serverName); // Let SSH do region re-assign } } } List<String> nodes = useZKForAssignment ? ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode) : ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode); if (nodes != null && !nodes.isEmpty()) { for (String encodedRegionName : nodes) { processRegionInTransition(encodedRegionName, null); } } else if (!useZKForAssignment) { // We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions // in case the RPC call is not sent out yet before the master was shut down // since we update the state before we send the RPC call. We can't update // the state after the RPC call. Otherwise, we don't know what's happened // to the region if the master dies right after the RPC call is out. Map<String, RegionState> rits = regionStates.getRegionsInTransition(); for (RegionState regionState: rits.values()) { if (!serverManager.isServerOnline(regionState.getServerName())) { continue; // SSH will handle it } State state = regionState.getState(); LOG.info("Processing " + regionState); switch (state) { case CLOSED: invokeAssign(regionState.getRegion()); break; case PENDING_OPEN: retrySendRegionOpen(regionState); break; case PENDING_CLOSE: retrySendRegionClose(regionState); break; default: // No process for other states } } } } /** * At master failover, for pending_open region, make sure * sendRegionOpen RPC call is sent to the target regionserver */ private void retrySendRegionOpen(final RegionState regionState) { this.executorService.submit( new EventHandler(server, EventType.M_MASTER_RECOVERY) { @Override public void process() throws IOException { HRegionInfo hri = regionState.getRegion(); ServerName serverName = regionState.getServerName(); ReentrantLock lock = locker.acquireLock(hri.getEncodedName()); try { while (serverManager.isServerOnline(serverName) && !server.isStopped() && !server.isAborted()) { try { List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST; if (shouldAssignRegionsWithFavoredNodes) { favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri); } RegionOpeningState regionOpenState = serverManager.sendRegionOpen( serverName, hri, -1, favoredNodes); if (regionOpenState == RegionOpeningState.FAILED_OPENING) { // Failed opening this region, this means the target server didn't get // the original region open RPC, so re-assign it with a new plan LOG.debug("Got failed_opening in retry sendRegionOpen for " + regionState + ", re-assign it"); invokeAssign(hri, true); } return; // Done. } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException) t).unwrapRemoteException(); } // In case SocketTimeoutException/FailedServerException, we will retry if (t instanceof java.net.SocketTimeoutException || t instanceof FailedServerException) { Threads.sleep(100); continue; } // For other exceptions, re-assign it LOG.debug("Got exception in retry sendRegionOpen for " + regionState + ", re-assign it", t); invokeAssign(hri); return; // Done. } } } finally { lock.unlock(); } } }); } /** * At master failover, for pending_close region, make sure * sendRegionClose RPC call is sent to the target regionserver */ private void retrySendRegionClose(final RegionState regionState) { this.executorService.submit( new EventHandler(server, EventType.M_MASTER_RECOVERY) { @Override public void process() throws IOException { HRegionInfo hri = regionState.getRegion(); ServerName serverName = regionState.getServerName(); ReentrantLock lock = locker.acquireLock(hri.getEncodedName()); try { while (serverManager.isServerOnline(serverName) && !server.isStopped() && !server.isAborted()) { try { if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) { // This means the region is still on the target server LOG.debug("Got false in retry sendRegionClose for " + regionState + ", re-close it"); invokeUnAssign(hri); } return; // Done. } catch (Throwable t) { if (t instanceof RemoteException) { t = ((RemoteException) t).unwrapRemoteException(); } // In case SocketTimeoutException/FailedServerException, we will retry if (t instanceof java.net.SocketTimeoutException || t instanceof FailedServerException) { Threads.sleep(100); continue; } if (!(t instanceof NotServingRegionException || t instanceof RegionAlreadyInTransitionException)) { // NotServingRegionException/RegionAlreadyInTransitionException // means the target server got the original region close request. // For other exceptions, re-close it LOG.debug("Got exception in retry sendRegionClose for " + regionState + ", re-close it", t); invokeUnAssign(hri); } return; // Done. } } } finally { lock.unlock(); } } }); } /** * Set Regions in transitions metrics. * This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized. * This iterator is not fail fast, which may lead to stale read; but that's better than * creating a copy of the map for metrics computation, as this method will be invoked * on a frequent interval. */ public void updateRegionsInTransitionMetrics() { long currentTime = System.currentTimeMillis(); int totalRITs = 0; int totalRITsOverThreshold = 0; long oldestRITTime = 0; int ritThreshold = this.server.getConfiguration(). getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000); for (RegionState state: regionStates.getRegionsInTransition().values()) { totalRITs++; long ritTime = currentTime - state.getStamp(); if (ritTime > ritThreshold) { // more than the threshold totalRITsOverThreshold++; } if (oldestRITTime < ritTime) { oldestRITTime = ritTime; } } if (this.metricsAssignmentManager != null) { this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime); this.metricsAssignmentManager.updateRITCount(totalRITs); this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold); } } /** * @param region Region whose plan we are to clear. */ void clearRegionPlan(final HRegionInfo region) { synchronized (this.regionPlans) { this.regionPlans.remove(region.getEncodedName()); } } /** * Wait on region to clear regions-in-transition. * @param hri Region to wait on. * @throws IOException */ public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri) throws IOException, InterruptedException { waitOnRegionToClearRegionsInTransition(hri, -1L); } /** * Wait on region to clear regions-in-transition or time out * @param hri * @param timeOut Milliseconds to wait for current region to be out of transition state. * @return True when a region clears regions-in-transition before timeout otherwise false * @throws InterruptedException */ public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut) throws InterruptedException { if (!regionStates.isRegionInTransition(hri)) return true; long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis() + timeOut; // There is already a timeout monitor on regions in transition so I // should not have to have one here too? LOG.info("Waiting for " + hri.getEncodedName() + " to leave regions-in-transition, timeOut=" + timeOut + " ms."); while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) { regionStates.waitForUpdate(100); if (EnvironmentEdgeManager.currentTimeMillis() > end) { LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned."); return false; } } if (this.server.isStopped()) { LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set"); return false; } return true; } /** * Update timers for all regions in transition going against the server in the * serversInUpdatingTimer. */ public class TimerUpdater extends Chore { public TimerUpdater(final int period, final Stoppable stopper) { super("AssignmentTimerUpdater", period, stopper); } @Override protected void chore() { Preconditions.checkState(tomActivated); ServerName serverToUpdateTimer = null; while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) { if (serverToUpdateTimer == null) { serverToUpdateTimer = serversInUpdatingTimer.first(); } else { serverToUpdateTimer = serversInUpdatingTimer .higher(serverToUpdateTimer); } if (serverToUpdateTimer == null) { break; } updateTimers(serverToUpdateTimer); serversInUpdatingTimer.remove(serverToUpdateTimer); } } } /** * Monitor to check for time outs on region transition operations */ public class TimeoutMonitor extends Chore { private boolean allRegionServersOffline = false; private ServerManager serverManager; private final int timeout; /** * Creates a periodic monitor to check for time outs on region transition * operations. This will deal with retries if for some reason something * doesn't happen within the specified timeout. * @param period * @param stopper When {@link Stoppable#isStopped()} is true, this thread will * cleanup and exit cleanly. * @param timeout */ public TimeoutMonitor(final int period, final Stoppable stopper, ServerManager serverManager, final int timeout) { super("AssignmentTimeoutMonitor", period, stopper); this.timeout = timeout; this.serverManager = serverManager; } private synchronized void setAllRegionServersOffline( boolean allRegionServersOffline) { this.allRegionServersOffline = allRegionServersOffline; } @Override protected void chore() { Preconditions.checkState(tomActivated); boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty(); // Iterate all regions in transition checking for time outs long now = System.currentTimeMillis(); // no lock concurrent access ok: we will be working on a copy, and it's java-valid to do // a copy while another thread is adding/removing items for (String regionName : regionStates.getRegionsInTransition().keySet()) { RegionState regionState = regionStates.getRegionTransitionState(regionName); if (regionState == null) continue; if (regionState.getStamp() + timeout <= now) { // decide on action upon timeout actOnTimeOut(regionState); } else if (this.allRegionServersOffline && !noRSAvailable) { RegionPlan existingPlan = regionPlans.get(regionName); if (existingPlan == null || !this.serverManager.isServerOnline(existingPlan .getDestination())) { // if some RSs just came back online, we can start the assignment // right away actOnTimeOut(regionState); } } } setAllRegionServersOffline(noRSAvailable); } private void actOnTimeOut(RegionState regionState) { HRegionInfo regionInfo = regionState.getRegion(); LOG.info("Regions in transition timed out: " + regionState); // Expired! Do a retry. switch (regionState.getState()) { case CLOSED: LOG.info("Region " + regionInfo.getEncodedName() + " has been CLOSED for too long, waiting on queued " + "ClosedRegionHandler to run or server shutdown"); // Update our timestamp. regionState.updateTimestampToNow(); break; case OFFLINE: LOG.info("Region has been OFFLINE for too long, " + "reassigning " + regionInfo.getRegionNameAsString() + " to a random server"); invokeAssign(regionInfo); break; case PENDING_OPEN: LOG.info("Region has been PENDING_OPEN for too " + "long, reassigning region=" + regionInfo.getRegionNameAsString()); invokeAssign(regionInfo); break; case OPENING: processOpeningState(regionInfo); break; case OPEN: LOG.error("Region has been OPEN for too long, " + "we don't know where region was opened so can't do anything"); regionState.updateTimestampToNow(); break; case PENDING_CLOSE: LOG.info("Region has been PENDING_CLOSE for too " + "long, running forced unassign again on region=" + regionInfo.getRegionNameAsString()); invokeUnassign(regionInfo); break; case CLOSING: LOG.info("Region has been CLOSING for too " + "long, this should eventually complete or the server will " + "expire, send RPC again"); invokeUnassign(regionInfo); break; case SPLIT: case SPLITTING: case FAILED_OPEN: case FAILED_CLOSE: case MERGING: break; default: throw new IllegalStateException("Received event is not valid."); } } } private void processOpeningState(HRegionInfo regionInfo) { LOG.info("Region has been OPENING for too long, reassigning region=" + regionInfo.getRegionNameAsString()); // Should have a ZK node in OPENING state try { String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName()); Stat stat = new Stat(); byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat); if (data == null) { LOG.warn("Data is null, node " + node + " no longer exists"); return; } RegionTransition rt = RegionTransition.parseFrom(data); EventType et = rt.getEventType(); if (et == EventType.RS_ZK_REGION_OPENED) { LOG.debug("Region has transitioned to OPENED, allowing " + "watched event handlers to process"); return; } else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) { LOG.warn("While timing out a region, found ZK node in unexpected state: " + et); return; } invokeAssign(regionInfo); } catch (KeeperException ke) { LOG.error("Unexpected ZK exception timing out CLOSING region", ke); } catch (DeserializationException e) { LOG.error("Unexpected exception parsing CLOSING region", e); } } void invokeAssign(HRegionInfo regionInfo) { invokeAssign(regionInfo, true); } void invokeAssign(HRegionInfo regionInfo, boolean newPlan) { threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan)); } void invokeUnAssign(HRegionInfo regionInfo) { threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo)); } private void invokeUnassign(HRegionInfo regionInfo) { threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo)); } public boolean isCarryingMeta(ServerName serverName) { return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO); } /** * Check if the shutdown server carries the specific region. * We have a bunch of places that store region location * Those values aren't consistent. There is a delay of notification. * The location from zookeeper unassigned node has the most recent data; * but the node could be deleted after the region is opened by AM. * The AM's info could be old when OpenedRegionHandler * processing hasn't finished yet when server shutdown occurs. * @return whether the serverName currently hosts the region */ private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) { RegionTransition rt = null; try { byte [] data = ZKAssign.getData(watcher, hri.getEncodedName()); // This call can legitimately come by null rt = data == null? null: RegionTransition.parseFrom(data); } catch (KeeperException e) { server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e); } catch (DeserializationException e) { server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e); } ServerName addressFromZK = rt != null? rt.getServerName(): null; if (addressFromZK != null) { // if we get something from ZK, we will use the data boolean matchZK = addressFromZK.equals(serverName); LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK + " current=" + serverName + ", matches=" + matchZK); return matchZK; } ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri); boolean matchAM = (addressFromAM != null && addressFromAM.equals(serverName)); LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() + " is on server=" + (addressFromAM != null ? addressFromAM : "null") + " server being checked: " + serverName); return matchAM; } /** * Process shutdown server removing any assignments. * @param sn Server that went down. * @return list of regions in transition on this server */ public List<HRegionInfo> processServerShutdown(final ServerName sn) { // Clean out any existing assignment plans for this server synchronized (this.regionPlans) { for (Iterator <Map.Entry<String, RegionPlan>> i = this.regionPlans.entrySet().iterator(); i.hasNext();) { Map.Entry<String, RegionPlan> e = i.next(); ServerName otherSn = e.getValue().getDestination(); // The name will be null if the region is planned for a random assign. if (otherSn != null && otherSn.equals(sn)) { // Use iterator's remove else we'll get CME i.remove(); } } } List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn); for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) { HRegionInfo hri = it.next(); String encodedName = hri.getEncodedName(); // We need a lock on the region as we could update it Lock lock = locker.acquireLock(encodedName); try { RegionState regionState = regionStates.getRegionTransitionState(encodedName); if (regionState == null || (regionState.getServerName() != null && !regionState.isOnServer(sn)) || !(regionState.isFailedClose() || regionState.isOffline() || regionState.isPendingOpenOrOpening())) { LOG.info("Skip " + regionState + " since it is not opening/failed_close" + " on the dead server any more: " + sn); it.remove(); } else { try { // Delete the ZNode if exists ZKAssign.deleteNodeFailSilent(watcher, hri); } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } if (zkTable.isDisablingOrDisabledTable(hri.getTable())) { regionStates.regionOffline(hri); it.remove(); continue; } // Mark the region offline and assign it again by SSH regionStates.updateRegionState(hri, State.OFFLINE); } } finally { lock.unlock(); } } return regions; } /** * @param plan Plan to execute. */ public void balance(final RegionPlan plan) { HRegionInfo hri = plan.getRegionInfo(); TableName tableName = hri.getTable(); if (zkTable.isDisablingOrDisabledTable(tableName)) { LOG.info("Ignored moving region of disabling/disabled table " + tableName); return; } // Move the region only if it's assigned String encodedName = hri.getEncodedName(); ReentrantLock lock = locker.acquireLock(encodedName); try { if (!regionStates.isRegionOnline(hri)) { RegionState state = regionStates.getRegionState(encodedName); LOG.info("Ignored moving region not assigned: " + hri + ", " + (state == null ? "not in region states" : state)); return; } synchronized (this.regionPlans) { this.regionPlans.put(plan.getRegionName(), plan); } unassign(hri, false, plan.getDestination()); } finally { lock.unlock(); } } public void stop() { shutdown(); // Stop executor service, etc if (tomActivated){ this.timeoutMonitor.interrupt(); this.timerUpdater.interrupt(); } } /** * Shutdown the threadpool executor service */ public void shutdown() { // It's an immediate shutdown, so we're clearing the remaining tasks. synchronized (zkEventWorkerWaitingList){ zkEventWorkerWaitingList.clear(); } threadPoolExecutorService.shutdownNow(); zkEventWorkers.shutdownNow(); regionStateStore.stop(); } protected void setEnabledTable(TableName tableName) { try { this.zkTable.setEnabledTable(tableName); } catch (KeeperException e) { // here we can abort as it is the start up flow String errorMsg = "Unable to ensure that the table " + tableName + " will be" + " enabled because of a ZooKeeper issue"; LOG.error(errorMsg); this.server.abort(errorMsg, e); } } /** * Set region as OFFLINED up in zookeeper asynchronously. * @param state * @return True if we succeeded, false otherwise (State was incorrect or failed * updating zk). */ private boolean asyncSetOfflineInZooKeeper(final RegionState state, final AsyncCallback.StringCallback cb, final ServerName destination) { if (!state.isClosed() && !state.isOffline()) { this.server.abort("Unexpected state trying to OFFLINE; " + state, new IllegalStateException()); return false; } regionStates.updateRegionState(state.getRegion(), State.OFFLINE); try { ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(), destination, cb, state); } catch (KeeperException e) { if (e instanceof NodeExistsException) { LOG.warn("Node for " + state.getRegion() + " already exists"); } else { server.abort("Unexpected ZK exception creating/setting node OFFLINE", e); } return false; } return true; } private boolean deleteNodeInStates(String encodedName, String desc, ServerName sn, EventType... types) { try { for (EventType et: types) { if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) { return true; } } LOG.info("Failed to delete the " + desc + " node for " + encodedName + ". The node type may not match"); } catch (NoNodeException e) { if (LOG.isDebugEnabled()) { LOG.debug("The " + desc + " node for " + encodedName + " already deleted"); } } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting " + desc + " node for the region " + encodedName, ke); } return false; } private void deleteMergingNode(String encodedName, ServerName sn) { deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING, EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED); } private void deleteSplittingNode(String encodedName, ServerName sn) { deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT); } private void onRegionFailedOpen( final HRegionInfo hri, final ServerName sn) { String encodedName = hri.getEncodedName(); AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName); if (failedOpenCount == null) { failedOpenCount = new AtomicInteger(); // No need to use putIfAbsent, or extra synchronization since // this whole handleRegion block is locked on the encoded region // name, and failedOpenTracker is updated only in this block failedOpenTracker.put(encodedName, failedOpenCount); } if (failedOpenCount.incrementAndGet() >= maximumAttempts) { regionStates.updateRegionState(hri, State.FAILED_OPEN); // remove the tracking info to save memory, also reset // the count for next open initiative failedOpenTracker.remove(encodedName); } else { // Handle this the same as if it were opened and then closed. RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED); if (regionState != null) { // When there are more than one region server a new RS is selected as the // destination and the same is updated in the region plan. (HBASE-5546) Set<TableName> disablingOrDisabled = null; try { disablingOrDisabled = ZKTable.getDisablingTables(watcher); disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher)); } catch (KeeperException e) { server.abort("Cannot retrieve info about disabling or disabled tables ", e); } if (disablingOrDisabled.contains(hri.getTable())) { offlineDisabledRegion(hri); return; } // ZK Node is in CLOSED state, assign it. regionStates.updateRegionState(hri, RegionState.State.CLOSED); // This below has to do w/ online enable/disable of a table removeClosedRegion(hri); try { getRegionPlan(hri, sn, true); } catch (HBaseIOException e) { LOG.warn("Failed to get region plan", e); } invokeAssign(hri, false); } } } private void onRegionOpen( final HRegionInfo hri, final ServerName sn, long openSeqNum) { regionOnline(hri, sn, openSeqNum); if (useZKForAssignment) { try { // Delete the ZNode if exists ZKAssign.deleteNodeFailSilent(watcher, hri); } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } } // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (isTableDisabledOrDisabling(hri.getTable())) { invokeUnAssign(hri); } } private void onRegionClosed(final HRegionInfo hri) { if (isTableDisabledOrDisabling(hri.getTable())) { offlineDisabledRegion(hri); return; } regionStates.updateRegionState(hri, RegionState.State.CLOSED); // This below has to do w/ online enable/disable of a table removeClosedRegion(hri); invokeAssign(hri, false); } private String onRegionSplit(ServerName sn, TransitionCode code, HRegionInfo p, HRegionInfo a, HRegionInfo b) { RegionState rs_p = regionStates.getRegionState(p); RegionState rs_a = regionStates.getRegionState(a); RegionState rs_b = regionStates.getRegionState(b); if (!(rs_p.isOpenOrSplittingOnServer(sn) && (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn)) && (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) { return "Not in state good for split"; } regionStates.updateRegionState(a, State.SPLITTING_NEW, sn); regionStates.updateRegionState(b, State.SPLITTING_NEW, sn); regionStates.updateRegionState(p, State.SPLITTING); if (code == TransitionCode.SPLIT) { if (TEST_SKIP_SPLIT_HANDLING) { return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set"; } regionOffline(p, State.SPLIT); regionOnline(a, sn, 1); regionOnline(b, sn, 1); // User could disable the table before master knows the new region. if (isTableDisabledOrDisabling(p.getTable())) { invokeUnAssign(a); invokeUnAssign(b); } } else if (code == TransitionCode.SPLIT_PONR) { try { regionStateStore.splitRegion(p, a, b, sn); } catch (IOException ioe) { LOG.info("Failed to record split region " + p.getShortNameToLog()); return "Failed to record the splitting in meta"; } } else if (code == TransitionCode.SPLIT_REVERTED) { regionOnline(p, sn); regionOffline(a); regionOffline(b); if (isTableDisabledOrDisabling(p.getTable())) { invokeUnAssign(p); } } return null; } private boolean isTableDisabledOrDisabling(TableName t) { Set<TableName> disablingOrDisabled = null; try { disablingOrDisabled = ZKTable.getDisablingTables(watcher); disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher)); } catch (KeeperException e) { server.abort("Cannot retrieve info about disabling or disabled tables ", e); } return disablingOrDisabled.contains(t) ? true : false; } private String onRegionMerge(ServerName sn, TransitionCode code, HRegionInfo p, HRegionInfo a, HRegionInfo b) { RegionState rs_p = regionStates.getRegionState(p); RegionState rs_a = regionStates.getRegionState(a); RegionState rs_b = regionStates.getRegionState(b); if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn) && (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) { return "Not in state good for merge"; } regionStates.updateRegionState(a, State.MERGING); regionStates.updateRegionState(b, State.MERGING); regionStates.updateRegionState(p, State.MERGING_NEW, sn); String encodedName = p.getEncodedName(); if (code == TransitionCode.READY_TO_MERGE) { mergingRegions.put(encodedName, new PairOfSameType<HRegionInfo>(a, b)); } else if (code == TransitionCode.MERGED) { mergingRegions.remove(encodedName); regionOffline(a, State.MERGED); regionOffline(b, State.MERGED); regionOnline(p, sn, 1); // User could disable the table before master knows the new region. if (isTableDisabledOrDisabling(p.getTable())) { invokeUnAssign(p); } } else if (code == TransitionCode.MERGE_PONR) { try { regionStateStore.mergeRegions(p, a, b, sn); } catch (IOException ioe) { LOG.info("Failed to record merged region " + p.getShortNameToLog()); return "Failed to record the merging in meta"; } } else { mergingRegions.remove(encodedName); regionOnline(a, sn); regionOnline(b, sn); regionOffline(p); if (isTableDisabledOrDisabling(p.getTable())) { invokeUnAssign(a); invokeUnAssign(b); } } return null; } /** * A helper to handle region merging transition event. * It transitions merging regions to MERGING state. */ private boolean handleRegionMerging(final RegionTransition rt, final String encodedName, final String prettyPrintedRegionName, final ServerName sn) { if (!serverManager.isServerOnline(sn)) { LOG.warn("Dropped merging! ServerName=" + sn + " unknown."); return false; } byte [] payloadOfMerging = rt.getPayload(); List<HRegionInfo> mergingRegions; try { mergingRegions = HRegionInfo.parseDelimitedFrom( payloadOfMerging, 0, payloadOfMerging.length); } catch (IOException e) { LOG.error("Dropped merging! Failed reading " + rt.getEventType() + " payload for " + prettyPrintedRegionName); return false; } assert mergingRegions.size() == 3; HRegionInfo p = mergingRegions.get(0); HRegionInfo hri_a = mergingRegions.get(1); HRegionInfo hri_b = mergingRegions.get(2); RegionState rs_p = regionStates.getRegionState(p); RegionState rs_a = regionStates.getRegionState(hri_a); RegionState rs_b = regionStates.getRegionState(hri_b); if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn)) && (rs_b == null || rs_b.isOpenOrMergingOnServer(sn)) && (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) { LOG.warn("Dropped merging! Not in state good for MERGING; rs_p=" + rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b); return false; } EventType et = rt.getEventType(); if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) { try { if (RegionMergeTransaction.transitionMergingNode(watcher, p, hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGING) == -1) { byte[] data = ZKAssign.getData(watcher, encodedName); EventType currentType = null; if (data != null) { RegionTransition newRt = RegionTransition.parseFrom(data); currentType = newRt.getEventType(); } if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED && currentType != EventType.RS_ZK_REGION_MERGING)) { LOG.warn("Failed to transition pending_merge node " + encodedName + " to merging, it's now " + currentType); return false; } } } catch (Exception e) { LOG.warn("Failed to transition pending_merge node " + encodedName + " to merging", e); return false; } } synchronized (regionStates) { regionStates.updateRegionState(hri_a, State.MERGING); regionStates.updateRegionState(hri_b, State.MERGING); regionStates.updateRegionState(p, State.MERGING_NEW, sn); if (et != EventType.RS_ZK_REGION_MERGED) { this.mergingRegions.put(encodedName, new PairOfSameType<HRegionInfo>(hri_a, hri_b)); } else { this.mergingRegions.remove(encodedName); regionOffline(hri_a, State.MERGED); regionOffline(hri_b, State.MERGED); regionOnline(p, sn); } } if (et == EventType.RS_ZK_REGION_MERGED) { LOG.debug("Handling MERGED event for " + encodedName + "; deleting node"); // Remove region from ZK try { boolean successful = false; while (!successful) { // It's possible that the RS tickles in between the reading of the // znode and the deleting, so it's safe to retry. successful = ZKAssign.deleteNode(watcher, encodedName, EventType.RS_ZK_REGION_MERGED, sn); } } catch (KeeperException e) { if (e instanceof NoNodeException) { String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName); LOG.debug("The znode " + znodePath + " does not exist. May be deleted already."); } else { server.abort("Error deleting MERGED node " + encodedName, e); } } LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString() + ", region_a=" + hri_a.getRegionNameAsString() + ", region_b=" + hri_b.getRegionNameAsString() + ", on " + sn); // User could disable the table before master knows the new region. if (zkTable.isDisablingOrDisabledTable(p.getTable())) { unassign(p); } } return true; } /** * A helper to handle region splitting transition event. */ private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName, final String prettyPrintedRegionName, final ServerName sn) { if (!serverManager.isServerOnline(sn)) { LOG.warn("Dropped splitting! ServerName=" + sn + " unknown."); return false; } byte [] payloadOfSplitting = rt.getPayload(); List<HRegionInfo> splittingRegions; try { splittingRegions = HRegionInfo.parseDelimitedFrom( payloadOfSplitting, 0, payloadOfSplitting.length); } catch (IOException e) { LOG.error("Dropped splitting! Failed reading " + rt.getEventType() + " payload for " + prettyPrintedRegionName); return false; } assert splittingRegions.size() == 2; HRegionInfo hri_a = splittingRegions.get(0); HRegionInfo hri_b = splittingRegions.get(1); RegionState rs_p = regionStates.getRegionState(encodedName); RegionState rs_a = regionStates.getRegionState(hri_a); RegionState rs_b = regionStates.getRegionState(hri_b); if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn)) && (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn)) && (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) { LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p=" + rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b); return false; } if (rs_p == null) { // Splitting region should be online rs_p = regionStates.updateRegionState(rt, State.OPEN); if (rs_p == null) { LOG.warn("Received splitting for region " + prettyPrintedRegionName + " from server " + sn + " but it doesn't exist anymore," + " probably already processed its split"); return false; } regionStates.regionOnline(rs_p.getRegion(), sn); } HRegionInfo p = rs_p.getRegion(); EventType et = rt.getEventType(); if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) { try { if (SplitTransaction.transitionSplittingNode(watcher, p, hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLITTING) == -1) { byte[] data = ZKAssign.getData(watcher, encodedName); EventType currentType = null; if (data != null) { RegionTransition newRt = RegionTransition.parseFrom(data); currentType = newRt.getEventType(); } if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT && currentType != EventType.RS_ZK_REGION_SPLITTING)) { LOG.warn("Failed to transition pending_split node " + encodedName + " to splitting, it's now " + currentType); return false; } } } catch (Exception e) { LOG.warn("Failed to transition pending_split node " + encodedName + " to splitting", e); return false; } } synchronized (regionStates) { regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn); regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn); regionStates.updateRegionState(rt, State.SPLITTING); // The below is for testing ONLY! We can't do fault injection easily, so // resort to this kinda uglyness -- St.Ack 02/25/2011. if (TEST_SKIP_SPLIT_HANDLING) { LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set"); return true; // return true so that the splitting node stays } if (et == EventType.RS_ZK_REGION_SPLIT) { regionOffline(p, State.SPLIT); regionOnline(hri_a, sn); regionOnline(hri_b, sn); } } if (et == EventType.RS_ZK_REGION_SPLIT) { LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node"); // Remove region from ZK try { boolean successful = false; while (!successful) { // It's possible that the RS tickles in between the reading of the // znode and the deleting, so it's safe to retry. successful = ZKAssign.deleteNode(watcher, encodedName, EventType.RS_ZK_REGION_SPLIT, sn); } } catch (KeeperException e) { if (e instanceof NoNodeException) { String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName); LOG.debug("The znode " + znodePath + " does not exist. May be deleted already."); } else { server.abort("Error deleting SPLIT node " + encodedName, e); } } LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString() + ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b=" + hri_b.getRegionNameAsString() + ", on " + sn); // User could disable the table before master knows the new region. if (zkTable.isDisablingOrDisabledTable(p.getTable())) { unassign(hri_a); unassign(hri_b); } } return true; } /** * A region is offline. The new state should be the specified one, * if not null. If the specified state is null, the new state is Offline. * The specified state can be Split/Merged/Offline/null only. */ private void regionOffline(final HRegionInfo regionInfo, final State state) { regionStates.regionOffline(regionInfo, state); removeClosedRegion(regionInfo); // remove the region plan as well just in case. clearRegionPlan(regionInfo); balancer.regionOffline(regionInfo); // Tell our listeners that a region was closed sendRegionClosedNotification(regionInfo); } private void sendRegionOpenedNotification(final HRegionInfo regionInfo, final ServerName serverName) { if (!this.listeners.isEmpty()) { for (AssignmentListener listener : this.listeners) { listener.regionOpened(regionInfo, serverName); } } } private void sendRegionClosedNotification(final HRegionInfo regionInfo) { if (!this.listeners.isEmpty()) { for (AssignmentListener listener : this.listeners) { listener.regionClosed(regionInfo); } } } /** * Try to update some region states. If the state machine prevents * such update, an error message is returned to explain the reason. * * It's expected that in each transition there should have just one * region for opening/closing, 3 regions for splitting/merging. * These regions should be on the server that requested the change. * * Region state machine. Only these transitions * are expected to be triggered by a region server. * * On the state transition: * (1) Open/Close should be initiated by master * (a) Master sets the region to pending_open/pending_close * in memory and hbase:meta after sending the request * to the region server * (b) Region server reports back to the master * after open/close is done (either success/failure) * (c) If region server has problem to report the status * to master, it must be because the master is down or some * temporary network issue. Otherwise, the region server should * abort since it must be a bug. If the master is not accessible, * the region server should keep trying until the server is * stopped or till the status is reported to the (new) master * (d) If region server dies in the middle of opening/closing * a region, SSH picks it up and finishes it * (e) If master dies in the middle, the new master recovers * the state during initialization from hbase:meta. Region server * can report any transition that has not been reported to * the previous active master yet * (2) Split/merge is initiated by region servers * (a) To split a region, a region server sends a request * to master to try to set a region to splitting, together with * two daughters (to be created) to splitting new. If approved * by the master, the splitting can then move ahead * (b) To merge two regions, a region server sends a request to * master to try to set the new merged region (to be created) to * merging_new, together with two regions (to be merged) to merging. * If it is ok with the master, the merge can then move ahead * (c) Once the splitting/merging is done, the region server * reports the status back to the master either success/failure. * (d) Other scenarios should be handled similarly as for * region open/close */ protected String onRegionTransition(final ServerName serverName, final RegionStateTransition transition) { TransitionCode code = transition.getTransitionCode(); HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); RegionState current = regionStates.getRegionState(hri); if (LOG.isDebugEnabled()) { LOG.debug("Got transition " + code + " for " + (current != null ? current.toString() : hri.getShortNameToLog()) + " from " + serverName); } String errorMsg = null; switch (code) { case OPENED: if (current != null && current.isOpened() && current.isOnServer(serverName)) { LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on " + serverName); break; } case FAILED_OPEN: if (current == null || !current.isPendingOpenOrOpeningOnServer(serverName)) { errorMsg = hri.getShortNameToLog() + " is not pending open on " + serverName; } else if (code == TransitionCode.FAILED_OPEN) { onRegionFailedOpen(hri, serverName); } else { long openSeqNum = HConstants.NO_SEQNUM; if (transition.hasOpenSeqNum()) { openSeqNum = transition.getOpenSeqNum(); } if (openSeqNum < 0) { errorMsg = "Newly opened region has invalid open seq num " + openSeqNum; } else { onRegionOpen(hri, serverName, openSeqNum); } } break; case CLOSED: if (current == null || !current.isPendingCloseOrClosingOnServer(serverName)) { errorMsg = hri.getShortNameToLog() + " is not pending close on " + serverName; } else { onRegionClosed(hri); } break; case READY_TO_SPLIT: case SPLIT_PONR: case SPLIT: case SPLIT_REVERTED: errorMsg = onRegionSplit(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)), HRegionInfo.convert(transition.getRegionInfo(2))); break; case READY_TO_MERGE: case MERGE_PONR: case MERGED: case MERGE_REVERTED: errorMsg = onRegionMerge(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)), HRegionInfo.convert(transition.getRegionInfo(2))); break; default: errorMsg = "Unexpected transition code " + code; } if (errorMsg != null) { LOG.error("Failed to transtion region from " + current + " to " + code + " by " + serverName + ": " + errorMsg); } return errorMsg; } /** * @return Instance of load balancer */ public LoadBalancer getBalancer() { return this.balancer; } }
Java
package io.dropwizard.jetty; import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.jetty9.InstrumentedConnectionFactory; import com.fasterxml.jackson.databind.ObjectMapper; import io.dropwizard.configuration.ResourceConfigurationSourceProvider; import io.dropwizard.configuration.YamlConfigurationFactory; import io.dropwizard.jackson.DiscoverableSubtypeResolver; import io.dropwizard.jackson.Jackson; import io.dropwizard.logging.ConsoleAppenderFactory; import io.dropwizard.logging.FileAppenderFactory; import io.dropwizard.logging.SyslogAppenderFactory; import io.dropwizard.util.DataSize; import io.dropwizard.util.Duration; import io.dropwizard.validation.BaseValidator; import org.assertj.core.api.InstanceOfAssertFactories; import org.eclipse.jetty.http.CookieCompliance; import org.eclipse.jetty.http.HttpCompliance; import org.eclipse.jetty.server.ForwardedRequestCustomizer; import org.eclipse.jetty.server.HttpConfiguration; import org.eclipse.jetty.server.HttpConnectionFactory; import org.eclipse.jetty.server.ProxyConnectionFactory; import org.eclipse.jetty.server.Server; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.util.thread.QueuedThreadPool; import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler; import org.eclipse.jetty.util.thread.ThreadPool; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import javax.validation.Validator; import java.util.Optional; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; class HttpConnectorFactoryTest { private final ObjectMapper objectMapper = Jackson.newObjectMapper(); private final Validator validator = BaseValidator.newValidator(); @BeforeEach void setUp() { objectMapper.getSubtypeResolver().registerSubtypes(ConsoleAppenderFactory.class, FileAppenderFactory.class, SyslogAppenderFactory.class, HttpConnectorFactory.class); } @Test void isDiscoverable() { assertThat(new DiscoverableSubtypeResolver().getDiscoveredSubtypes()) .contains(HttpConnectorFactory.class); } @Test void testParseMinimalConfiguration() throws Exception { HttpConnectorFactory http = new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw") .build(new ResourceConfigurationSourceProvider(), "yaml/http-connector-minimal.yml"); assertThat(http.getPort()).isEqualTo(8080); assertThat(http.getBindHost()).isNull(); assertThat(http.isInheritChannel()).isFalse(); assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(512)); assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(32)); assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(8)); assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(8)); assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(8)); assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(30)); assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(64)); assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(1024)); assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(64)); assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(0)); assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(0)); assertThat(http.getAcceptorThreads()).isEmpty(); assertThat(http.getSelectorThreads()).isEmpty(); assertThat(http.getAcceptQueueSize()).isNull(); assertThat(http.isReuseAddress()).isTrue(); assertThat(http.isUseServerHeader()).isFalse(); assertThat(http.isUseDateHeader()).isTrue(); assertThat(http.isUseForwardedHeaders()).isFalse(); assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC7230); assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265); assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265); } @Test void testParseFullConfiguration() throws Exception { HttpConnectorFactory http = new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw") .build(new ResourceConfigurationSourceProvider(), "yaml/http-connector.yml"); assertThat(http.getPort()).isEqualTo(9090); assertThat(http.getBindHost()).isEqualTo("127.0.0.1"); assertThat(http.isInheritChannel()).isTrue(); assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(256)); assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(128)); assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(4)); assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(4)); assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(4)); assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(10)); assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(128)); assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(500)); assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(32)); assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(42)); assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(200)); assertThat(http.getAcceptorThreads()).contains(1); assertThat(http.getSelectorThreads()).contains(4); assertThat(http.getAcceptQueueSize()).isEqualTo(1024); assertThat(http.isReuseAddress()).isFalse(); assertThat(http.isUseServerHeader()).isTrue(); assertThat(http.isUseDateHeader()).isFalse(); assertThat(http.isUseForwardedHeaders()).isTrue(); HttpConfiguration httpConfiguration = http.buildHttpConfiguration(); assertThat(httpConfiguration.getCustomizers()).hasAtLeastOneElementOfType(ForwardedRequestCustomizer.class); assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC2616); assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC2965); assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265); } @Test void testBuildConnector() throws Exception { HttpConnectorFactory http = spy(new HttpConnectorFactory()); http.setBindHost("127.0.0.1"); http.setAcceptorThreads(Optional.of(1)); http.setSelectorThreads(Optional.of(2)); http.setAcceptQueueSize(1024); http.setMinResponseDataPerSecond(DataSize.bytes(200)); http.setMinRequestDataPerSecond(DataSize.bytes(42)); http.setRequestCookieCompliance(CookieCompliance.RFC6265); http.setResponseCookieCompliance(CookieCompliance.RFC6265); MetricRegistry metrics = new MetricRegistry(); ThreadPool threadPool = new QueuedThreadPool(); Server server = null; ServerConnector connector = null; try { server = new Server(); connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool); assertThat(connector.getPort()).isEqualTo(8080); assertThat(connector.getHost()).isEqualTo("127.0.0.1"); assertThat(connector.getAcceptQueueSize()).isEqualTo(1024); assertThat(connector.getReuseAddress()).isTrue(); assertThat(connector.getIdleTimeout()).isEqualTo(30000); assertThat(connector.getName()).isEqualTo("test-http-connector"); assertThat(connector.getServer()).isSameAs(server); assertThat(connector.getScheduler()).isInstanceOf(ScheduledExecutorScheduler.class); assertThat(connector.getExecutor()).isSameAs(threadPool); verify(http).buildBufferPool(64, 1024, 64 * 1024); assertThat(connector.getAcceptors()).isEqualTo(1); assertThat(connector.getSelectorManager().getSelectorCount()).isEqualTo(2); InstrumentedConnectionFactory connectionFactory = (InstrumentedConnectionFactory) connector.getConnectionFactory("http/1.1"); assertThat(connectionFactory).isInstanceOf(InstrumentedConnectionFactory.class); assertThat(connectionFactory) .extracting("connectionFactory") .asInstanceOf(InstanceOfAssertFactories.type(HttpConnectionFactory.class)) .satisfies(factory -> { assertThat(factory.getInputBufferSize()).isEqualTo(8192); assertThat(factory.getHttpCompliance()).isEqualByComparingTo(HttpCompliance.RFC7230); }) .extracting(HttpConnectionFactory::getHttpConfiguration) .satisfies(config -> { assertThat(config.getHeaderCacheSize()).isEqualTo(512); assertThat(config.getOutputBufferSize()).isEqualTo(32768); assertThat(config.getRequestHeaderSize()).isEqualTo(8192); assertThat(config.getResponseHeaderSize()).isEqualTo(8192); assertThat(config.getSendDateHeader()).isTrue(); assertThat(config.getSendServerVersion()).isFalse(); assertThat(config.getCustomizers()).noneMatch(customizer -> customizer.getClass().equals(ForwardedRequestCustomizer.class)); assertThat(config.getMinRequestDataRate()).isEqualTo(42); assertThat(config.getMinResponseDataRate()).isEqualTo(200); assertThat(config.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265); assertThat(config.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265); }); } finally { if (connector != null) { connector.stop(); } if (server != null) { server.stop(); } } } @Test void testBuildConnectorWithProxyProtocol() throws Exception { HttpConnectorFactory http = new HttpConnectorFactory(); http.setBindHost("127.0.0.1"); http.setUseProxyProtocol(true); MetricRegistry metrics = new MetricRegistry(); ThreadPool threadPool = new QueuedThreadPool(); Server server = null; ServerConnector connector = null; try { server = new Server(); connector = (ServerConnector) http.build(server, metrics, "test-http-connector-with-proxy-protocol", threadPool); assertThat(connector.getConnectionFactories().toArray()[0]).isInstanceOf(ProxyConnectionFactory.class); } finally { if (connector != null) { connector.stop(); } if (server != null) { server.stop(); } } } @Test void testDefaultAcceptQueueSize() throws Exception { HttpConnectorFactory http = new HttpConnectorFactory(); http.setBindHost("127.0.0.1"); http.setAcceptorThreads(Optional.of(1)); http.setSelectorThreads(Optional.of(2)); MetricRegistry metrics = new MetricRegistry(); ThreadPool threadPool = new QueuedThreadPool(); Server server = null; ServerConnector connector = null; try { server = new Server(); connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool); assertThat(connector.getAcceptQueueSize()).isEqualTo(NetUtil.getTcpBacklog()); } finally { if (connector != null) { connector.stop(); } if (server != null) { server.stop(); } } } }
Java
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/krb/pac.c */ /* * Copyright 2008 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "k5-int.h" #include "authdata.h" /* draft-brezak-win2k-krb-authz-00 */ /* * Add a buffer to the provided PAC and update header. */ krb5_error_code k5_pac_add_buffer(krb5_context context, krb5_pac pac, krb5_ui_4 type, const krb5_data *data, krb5_boolean zerofill, krb5_data *out_data) { PACTYPE *header; size_t header_len, i, pad = 0; char *pac_data; assert((data->data == NULL) == zerofill); /* Check there isn't already a buffer of this type */ if (k5_pac_locate_buffer(context, pac, type, NULL) == 0) { return EEXIST; } header = (PACTYPE *)realloc(pac->pac, sizeof(PACTYPE) + (pac->pac->cBuffers * sizeof(PAC_INFO_BUFFER))); if (header == NULL) { return ENOMEM; } pac->pac = header; header_len = PACTYPE_LENGTH + (pac->pac->cBuffers * PAC_INFO_BUFFER_LENGTH); if (data->length % PAC_ALIGNMENT) pad = PAC_ALIGNMENT - (data->length % PAC_ALIGNMENT); pac_data = realloc(pac->data.data, pac->data.length + PAC_INFO_BUFFER_LENGTH + data->length + pad); if (pac_data == NULL) { return ENOMEM; } pac->data.data = pac_data; /* Update offsets of existing buffers */ for (i = 0; i < pac->pac->cBuffers; i++) pac->pac->Buffers[i].Offset += PAC_INFO_BUFFER_LENGTH; /* Make room for new PAC_INFO_BUFFER */ memmove(pac->data.data + header_len + PAC_INFO_BUFFER_LENGTH, pac->data.data + header_len, pac->data.length - header_len); memset(pac->data.data + header_len, 0, PAC_INFO_BUFFER_LENGTH); /* Initialise new PAC_INFO_BUFFER */ pac->pac->Buffers[i].ulType = type; pac->pac->Buffers[i].cbBufferSize = data->length; pac->pac->Buffers[i].Offset = pac->data.length + PAC_INFO_BUFFER_LENGTH; assert((pac->pac->Buffers[i].Offset % PAC_ALIGNMENT) == 0); /* Copy in new PAC data and zero padding bytes */ if (zerofill) memset(pac->data.data + pac->pac->Buffers[i].Offset, 0, data->length); else memcpy(pac->data.data + pac->pac->Buffers[i].Offset, data->data, data->length); memset(pac->data.data + pac->pac->Buffers[i].Offset + data->length, 0, pad); pac->pac->cBuffers++; pac->data.length += PAC_INFO_BUFFER_LENGTH + data->length + pad; if (out_data != NULL) { out_data->data = pac->data.data + pac->pac->Buffers[i].Offset; out_data->length = data->length; } pac->verified = FALSE; return 0; } krb5_error_code KRB5_CALLCONV krb5_pac_add_buffer(krb5_context context, krb5_pac pac, krb5_ui_4 type, const krb5_data *data) { return k5_pac_add_buffer(context, pac, type, data, FALSE, NULL); } /* * Free a PAC */ void KRB5_CALLCONV krb5_pac_free(krb5_context context, krb5_pac pac) { if (pac != NULL) { zapfree(pac->data.data, pac->data.length); free(pac->pac); zapfree(pac, sizeof(*pac)); } } krb5_error_code k5_pac_locate_buffer(krb5_context context, const krb5_pac pac, krb5_ui_4 type, krb5_data *data) { PAC_INFO_BUFFER *buffer = NULL; size_t i; if (pac == NULL) return EINVAL; for (i = 0; i < pac->pac->cBuffers; i++) { if (pac->pac->Buffers[i].ulType == type) { if (buffer == NULL) buffer = &pac->pac->Buffers[i]; else return EINVAL; } } if (buffer == NULL) return ENOENT; assert(buffer->Offset + buffer->cbBufferSize <= pac->data.length); if (data != NULL) { data->length = buffer->cbBufferSize; data->data = pac->data.data + buffer->Offset; } return 0; } /* * Find a buffer and copy data into output */ krb5_error_code KRB5_CALLCONV krb5_pac_get_buffer(krb5_context context, krb5_pac pac, krb5_ui_4 type, krb5_data *data) { krb5_data d; krb5_error_code ret; ret = k5_pac_locate_buffer(context, pac, type, &d); if (ret != 0) return ret; data->data = k5memdup(d.data, d.length, &ret); if (data->data == NULL) return ret; data->length = d.length; return 0; } /* * Return an array of the types of data in the PAC */ krb5_error_code KRB5_CALLCONV krb5_pac_get_types(krb5_context context, krb5_pac pac, size_t *len, krb5_ui_4 **types) { size_t i; *types = (krb5_ui_4 *)malloc(pac->pac->cBuffers * sizeof(krb5_ui_4)); if (*types == NULL) return ENOMEM; *len = pac->pac->cBuffers; for (i = 0; i < pac->pac->cBuffers; i++) (*types)[i] = pac->pac->Buffers[i].ulType; return 0; } /* * Initialize PAC */ krb5_error_code KRB5_CALLCONV krb5_pac_init(krb5_context context, krb5_pac *ppac) { krb5_pac pac; pac = (krb5_pac)malloc(sizeof(*pac)); if (pac == NULL) return ENOMEM; pac->pac = (PACTYPE *)malloc(sizeof(PACTYPE)); if (pac->pac == NULL) { free(pac); return ENOMEM; } pac->pac->cBuffers = 0; pac->pac->Version = 0; pac->data.length = PACTYPE_LENGTH; pac->data.data = calloc(1, pac->data.length); if (pac->data.data == NULL) { krb5_pac_free(context, pac); return ENOMEM; } pac->verified = FALSE; *ppac = pac; return 0; } static krb5_error_code k5_pac_copy(krb5_context context, krb5_pac src, krb5_pac *dst) { size_t header_len; krb5_ui_4 cbuffers; krb5_error_code code; krb5_pac pac; cbuffers = src->pac->cBuffers; if (cbuffers != 0) cbuffers--; header_len = sizeof(PACTYPE) + cbuffers * sizeof(PAC_INFO_BUFFER); pac = (krb5_pac)malloc(sizeof(*pac)); if (pac == NULL) return ENOMEM; pac->pac = k5memdup(src->pac, header_len, &code); if (pac->pac == NULL) { free(pac); return code; } code = krb5int_copy_data_contents(context, &src->data, &pac->data); if (code != 0) { free(pac->pac); free(pac); return ENOMEM; } pac->verified = src->verified; *dst = pac; return 0; } /* * Parse the supplied data into the PAC allocated by this function */ krb5_error_code KRB5_CALLCONV krb5_pac_parse(krb5_context context, const void *ptr, size_t len, krb5_pac *ppac) { krb5_error_code ret; size_t i; const unsigned char *p = (const unsigned char *)ptr; krb5_pac pac; size_t header_len; krb5_ui_4 cbuffers, version; *ppac = NULL; if (len < PACTYPE_LENGTH) return ERANGE; cbuffers = load_32_le(p); p += 4; version = load_32_le(p); p += 4; if (version != 0) return EINVAL; header_len = PACTYPE_LENGTH + (cbuffers * PAC_INFO_BUFFER_LENGTH); if (len < header_len) return ERANGE; ret = krb5_pac_init(context, &pac); if (ret != 0) return ret; pac->pac = (PACTYPE *)realloc(pac->pac, sizeof(PACTYPE) + ((cbuffers - 1) * sizeof(PAC_INFO_BUFFER))); if (pac->pac == NULL) { krb5_pac_free(context, pac); return ENOMEM; } pac->pac->cBuffers = cbuffers; pac->pac->Version = version; for (i = 0; i < pac->pac->cBuffers; i++) { PAC_INFO_BUFFER *buffer = &pac->pac->Buffers[i]; buffer->ulType = load_32_le(p); p += 4; buffer->cbBufferSize = load_32_le(p); p += 4; buffer->Offset = load_64_le(p); p += 8; if (buffer->Offset % PAC_ALIGNMENT) { krb5_pac_free(context, pac); return EINVAL; } if (buffer->Offset < header_len || buffer->Offset + buffer->cbBufferSize > len) { krb5_pac_free(context, pac); return ERANGE; } } pac->data.data = realloc(pac->data.data, len); if (pac->data.data == NULL) { krb5_pac_free(context, pac); return ENOMEM; } memcpy(pac->data.data, ptr, len); pac->data.length = len; *ppac = pac; return 0; } static krb5_error_code k5_time_to_seconds_since_1970(int64_t ntTime, krb5_timestamp *elapsedSeconds) { uint64_t abstime; ntTime /= 10000000; abstime = ntTime > 0 ? ntTime - NT_TIME_EPOCH : -ntTime; if (abstime > UINT32_MAX) return ERANGE; *elapsedSeconds = abstime; return 0; } krb5_error_code k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds, uint64_t *ntTime) { *ntTime = elapsedSeconds; if (elapsedSeconds > 0) *ntTime += NT_TIME_EPOCH; *ntTime *= 10000000; return 0; } krb5_error_code k5_pac_validate_client(krb5_context context, const krb5_pac pac, krb5_timestamp authtime, krb5_const_principal principal) { krb5_error_code ret; krb5_data client_info; char *pac_princname; unsigned char *p; krb5_timestamp pac_authtime; krb5_ui_2 pac_princname_length; int64_t pac_nt_authtime; krb5_principal pac_principal; ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_CLIENT_INFO, &client_info); if (ret != 0) return ret; if (client_info.length < PAC_CLIENT_INFO_LENGTH) return ERANGE; p = (unsigned char *)client_info.data; pac_nt_authtime = load_64_le(p); p += 8; pac_princname_length = load_16_le(p); p += 2; ret = k5_time_to_seconds_since_1970(pac_nt_authtime, &pac_authtime); if (ret != 0) return ret; if (client_info.length < PAC_CLIENT_INFO_LENGTH + pac_princname_length || pac_princname_length % 2) return ERANGE; ret = k5_utf16le_to_utf8(p, pac_princname_length, &pac_princname); if (ret != 0) return ret; ret = krb5_parse_name_flags(context, pac_princname, KRB5_PRINCIPAL_PARSE_NO_REALM, &pac_principal); if (ret != 0) { free(pac_princname); return ret; } free(pac_princname); if (pac_authtime != authtime || !krb5_principal_compare_flags(context, pac_principal, principal, KRB5_PRINCIPAL_COMPARE_IGNORE_REALM)) ret = KRB5KRB_AP_WRONG_PRINC; krb5_free_principal(context, pac_principal); return ret; } static krb5_error_code k5_pac_zero_signature(krb5_context context, const krb5_pac pac, krb5_ui_4 type, krb5_data *data) { PAC_INFO_BUFFER *buffer = NULL; size_t i; assert(type == KRB5_PAC_SERVER_CHECKSUM || type == KRB5_PAC_PRIVSVR_CHECKSUM); assert(data->length >= pac->data.length); for (i = 0; i < pac->pac->cBuffers; i++) { if (pac->pac->Buffers[i].ulType == type) { buffer = &pac->pac->Buffers[i]; break; } } if (buffer == NULL) return ENOENT; if (buffer->Offset + buffer->cbBufferSize > pac->data.length) return ERANGE; if (buffer->cbBufferSize < PAC_SIGNATURE_DATA_LENGTH) return KRB5_BAD_MSIZE; /* Zero out the data portion of the checksum only */ memset(data->data + buffer->Offset + PAC_SIGNATURE_DATA_LENGTH, 0, buffer->cbBufferSize - PAC_SIGNATURE_DATA_LENGTH); return 0; } static krb5_error_code k5_pac_verify_server_checksum(krb5_context context, const krb5_pac pac, const krb5_keyblock *server) { krb5_error_code ret; krb5_data pac_data; /* PAC with zeroed checksums */ krb5_checksum checksum; krb5_data checksum_data; krb5_boolean valid; krb5_octet *p; ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_SERVER_CHECKSUM, &checksum_data); if (ret != 0) return ret; if (checksum_data.length < PAC_SIGNATURE_DATA_LENGTH) return KRB5_BAD_MSIZE; p = (krb5_octet *)checksum_data.data; checksum.checksum_type = load_32_le(p); checksum.length = checksum_data.length - PAC_SIGNATURE_DATA_LENGTH; checksum.contents = p + PAC_SIGNATURE_DATA_LENGTH; if (!krb5_c_is_keyed_cksum(checksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; pac_data.length = pac->data.length; pac_data.data = k5memdup(pac->data.data, pac->data.length, &ret); if (pac_data.data == NULL) return ret; /* Zero out both checksum buffers */ ret = k5_pac_zero_signature(context, pac, KRB5_PAC_SERVER_CHECKSUM, &pac_data); if (ret != 0) { free(pac_data.data); return ret; } ret = k5_pac_zero_signature(context, pac, KRB5_PAC_PRIVSVR_CHECKSUM, &pac_data); if (ret != 0) { free(pac_data.data); return ret; } ret = krb5_c_verify_checksum(context, server, KRB5_KEYUSAGE_APP_DATA_CKSUM, &pac_data, &checksum, &valid); free(pac_data.data); if (ret != 0) { return ret; } if (valid == FALSE) ret = KRB5KRB_AP_ERR_BAD_INTEGRITY; return ret; } static krb5_error_code k5_pac_verify_kdc_checksum(krb5_context context, const krb5_pac pac, const krb5_keyblock *privsvr) { krb5_error_code ret; krb5_data server_checksum, privsvr_checksum; krb5_checksum checksum; krb5_boolean valid; krb5_octet *p; ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_PRIVSVR_CHECKSUM, &privsvr_checksum); if (ret != 0) return ret; if (privsvr_checksum.length < PAC_SIGNATURE_DATA_LENGTH) return KRB5_BAD_MSIZE; ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_SERVER_CHECKSUM, &server_checksum); if (ret != 0) return ret; if (server_checksum.length < PAC_SIGNATURE_DATA_LENGTH) return KRB5_BAD_MSIZE; p = (krb5_octet *)privsvr_checksum.data; checksum.checksum_type = load_32_le(p); checksum.length = privsvr_checksum.length - PAC_SIGNATURE_DATA_LENGTH; checksum.contents = p + PAC_SIGNATURE_DATA_LENGTH; if (!krb5_c_is_keyed_cksum(checksum.checksum_type)) return KRB5KRB_AP_ERR_INAPP_CKSUM; server_checksum.data += PAC_SIGNATURE_DATA_LENGTH; server_checksum.length -= PAC_SIGNATURE_DATA_LENGTH; ret = krb5_c_verify_checksum(context, privsvr, KRB5_KEYUSAGE_APP_DATA_CKSUM, &server_checksum, &checksum, &valid); if (ret != 0) return ret; if (valid == FALSE) ret = KRB5KRB_AP_ERR_BAD_INTEGRITY; return ret; } krb5_error_code KRB5_CALLCONV krb5_pac_verify(krb5_context context, const krb5_pac pac, krb5_timestamp authtime, krb5_const_principal principal, const krb5_keyblock *server, const krb5_keyblock *privsvr) { krb5_error_code ret; if (server != NULL) { ret = k5_pac_verify_server_checksum(context, pac, server); if (ret != 0) return ret; } if (privsvr != NULL) { ret = k5_pac_verify_kdc_checksum(context, pac, privsvr); if (ret != 0) return ret; } if (principal != NULL) { ret = k5_pac_validate_client(context, pac, authtime, principal); if (ret != 0) return ret; } pac->verified = TRUE; return 0; } /* * PAC auth data attribute backend */ struct mspac_context { krb5_pac pac; }; static krb5_error_code mspac_init(krb5_context kcontext, void **plugin_context) { *plugin_context = NULL; return 0; } static void mspac_flags(krb5_context kcontext, void *plugin_context, krb5_authdatatype ad_type, krb5_flags *flags) { *flags = AD_USAGE_TGS_REQ; } static void mspac_fini(krb5_context kcontext, void *plugin_context) { return; } static krb5_error_code mspac_request_init(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void **request_context) { struct mspac_context *pacctx; pacctx = (struct mspac_context *)malloc(sizeof(*pacctx)); if (pacctx == NULL) return ENOMEM; pacctx->pac = NULL; *request_context = pacctx; return 0; } static krb5_error_code mspac_import_authdata(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_authdata **authdata, krb5_boolean kdc_issued, krb5_const_principal kdc_issuer) { krb5_error_code code; struct mspac_context *pacctx = (struct mspac_context *)request_context; if (kdc_issued) return EINVAL; if (pacctx->pac != NULL) { krb5_pac_free(kcontext, pacctx->pac); pacctx->pac = NULL; } assert(authdata[0] != NULL); assert((authdata[0]->ad_type & AD_TYPE_FIELD_TYPE_MASK) == KRB5_AUTHDATA_WIN2K_PAC); code = krb5_pac_parse(kcontext, authdata[0]->contents, authdata[0]->length, &pacctx->pac); return code; } static krb5_error_code mspac_export_authdata(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_flags usage, krb5_authdata ***out_authdata) { struct mspac_context *pacctx = (struct mspac_context *)request_context; krb5_error_code code; krb5_authdata **authdata; krb5_data data; if (pacctx->pac == NULL) return 0; authdata = calloc(2, sizeof(krb5_authdata *)); if (authdata == NULL) return ENOMEM; authdata[0] = calloc(1, sizeof(krb5_authdata)); if (authdata[0] == NULL) { free(authdata); return ENOMEM; } authdata[1] = NULL; code = krb5int_copy_data_contents(kcontext, &pacctx->pac->data, &data); if (code != 0) { krb5_free_authdata(kcontext, authdata); return code; } authdata[0]->magic = KV5M_AUTHDATA; authdata[0]->ad_type = KRB5_AUTHDATA_WIN2K_PAC; authdata[0]->length = data.length; authdata[0]->contents = (krb5_octet *)data.data; authdata[1] = NULL; *out_authdata = authdata; return 0; } static krb5_error_code mspac_verify(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, const krb5_auth_context *auth_context, const krb5_keyblock *key, const krb5_ap_req *req) { krb5_error_code code; struct mspac_context *pacctx = (struct mspac_context *)request_context; if (pacctx->pac == NULL) return EINVAL; code = krb5_pac_verify(kcontext, pacctx->pac, req->ticket->enc_part2->times.authtime, req->ticket->enc_part2->client, key, NULL); if (code != 0) TRACE_MSPAC_VERIFY_FAIL(kcontext, code); /* * If the above verification failed, don't fail the whole authentication, * just don't mark the PAC as verified. A checksum mismatch can occur if * the PAC was copied from a cross-realm TGT by an ignorant KDC, and Apple * macOS Server Open Directory (as of 10.6) generates PACs with no server * checksum at all. */ return 0; } static void mspac_request_fini(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context) { struct mspac_context *pacctx = (struct mspac_context *)request_context; if (pacctx != NULL) { if (pacctx->pac != NULL) krb5_pac_free(kcontext, pacctx->pac); free(pacctx); } } #define STRLENOF(x) (sizeof((x)) - 1) static struct { krb5_ui_4 type; krb5_data attribute; } mspac_attribute_types[] = { { (krb5_ui_4)-1, { KV5M_DATA, STRLENOF("urn:mspac:"), "urn:mspac:" } }, { KRB5_PAC_LOGON_INFO, { KV5M_DATA, STRLENOF("urn:mspac:logon-info"), "urn:mspac:logon-info" } }, { KRB5_PAC_CREDENTIALS_INFO, { KV5M_DATA, STRLENOF("urn:mspac:credentials-info"), "urn:mspac:credentials-info" } }, { KRB5_PAC_SERVER_CHECKSUM, { KV5M_DATA, STRLENOF("urn:mspac:server-checksum"), "urn:mspac:server-checksum" } }, { KRB5_PAC_PRIVSVR_CHECKSUM, { KV5M_DATA, STRLENOF("urn:mspac:privsvr-checksum"), "urn:mspac:privsvr-checksum" } }, { KRB5_PAC_CLIENT_INFO, { KV5M_DATA, STRLENOF("urn:mspac:client-info"), "urn:mspac:client-info" } }, { KRB5_PAC_DELEGATION_INFO, { KV5M_DATA, STRLENOF("urn:mspac:delegation-info"), "urn:mspac:delegation-info" } }, { KRB5_PAC_UPN_DNS_INFO, { KV5M_DATA, STRLENOF("urn:mspac:upn-dns-info"), "urn:mspac:upn-dns-info" } }, }; #define MSPAC_ATTRIBUTE_COUNT (sizeof(mspac_attribute_types)/sizeof(mspac_attribute_types[0])) static krb5_error_code mspac_type2attr(krb5_ui_4 type, krb5_data *attr) { unsigned int i; for (i = 0; i < MSPAC_ATTRIBUTE_COUNT; i++) { if (mspac_attribute_types[i].type == type) { *attr = mspac_attribute_types[i].attribute; return 0; } } return ENOENT; } static krb5_error_code mspac_attr2type(const krb5_data *attr, krb5_ui_4 *type) { unsigned int i; for (i = 0; i < MSPAC_ATTRIBUTE_COUNT; i++) { if (attr->length == mspac_attribute_types[i].attribute.length && strncasecmp(attr->data, mspac_attribute_types[i].attribute.data, attr->length) == 0) { *type = mspac_attribute_types[i].type; return 0; } } if (attr->length > STRLENOF("urn:mspac:") && strncasecmp(attr->data, "urn:mspac:", STRLENOF("urn:mspac:")) == 0) { char *p = &attr->data[STRLENOF("urn:mspac:")]; char *endptr; *type = strtoul(p, &endptr, 10); if (*type != 0 && *endptr == '\0') return 0; } return ENOENT; } static krb5_error_code mspac_get_attribute_types(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_data **out_attrs) { struct mspac_context *pacctx = (struct mspac_context *)request_context; unsigned int i, j; krb5_data *attrs; krb5_error_code code; if (pacctx->pac == NULL) return ENOENT; attrs = calloc(1 + pacctx->pac->pac->cBuffers + 1, sizeof(krb5_data)); if (attrs == NULL) return ENOMEM; j = 0; /* The entire PAC */ code = krb5int_copy_data_contents(kcontext, &mspac_attribute_types[0].attribute, &attrs[j++]); if (code != 0) { free(attrs); return code; } /* PAC buffers */ for (i = 0; i < pacctx->pac->pac->cBuffers; i++) { krb5_data attr; code = mspac_type2attr(pacctx->pac->pac->Buffers[i].ulType, &attr); if (code == 0) { code = krb5int_copy_data_contents(kcontext, &attr, &attrs[j++]); if (code != 0) { krb5int_free_data_list(kcontext, attrs); return code; } } else { int length; length = asprintf(&attrs[j].data, "urn:mspac:%d", pacctx->pac->pac->Buffers[i].ulType); if (length < 0) { krb5int_free_data_list(kcontext, attrs); return ENOMEM; } attrs[j++].length = length; } } attrs[j].data = NULL; attrs[j].length = 0; *out_attrs = attrs; return 0; } static krb5_error_code mspac_get_attribute(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, const krb5_data *attribute, krb5_boolean *authenticated, krb5_boolean *complete, krb5_data *value, krb5_data *display_value, int *more) { struct mspac_context *pacctx = (struct mspac_context *)request_context; krb5_error_code code; krb5_ui_4 type; if (display_value != NULL) { display_value->data = NULL; display_value->length = 0; } if (*more != -1 || pacctx->pac == NULL) return ENOENT; /* If it didn't verify, pretend it didn't exist. */ if (!pacctx->pac->verified) { TRACE_MSPAC_DISCARD_UNVERF(kcontext); return ENOENT; } code = mspac_attr2type(attribute, &type); if (code != 0) return code; /* -1 is a magic type that refers to the entire PAC */ if (type == (krb5_ui_4)-1) { if (value != NULL) code = krb5int_copy_data_contents(kcontext, &pacctx->pac->data, value); else code = 0; } else { if (value != NULL) code = krb5_pac_get_buffer(kcontext, pacctx->pac, type, value); else code = k5_pac_locate_buffer(kcontext, pacctx->pac, type, NULL); } if (code == 0) { *authenticated = pacctx->pac->verified; *complete = TRUE; } *more = 0; return code; } static krb5_error_code mspac_set_attribute(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_boolean complete, const krb5_data *attribute, const krb5_data *value) { struct mspac_context *pacctx = (struct mspac_context *)request_context; krb5_error_code code; krb5_ui_4 type; if (pacctx->pac == NULL) return ENOENT; code = mspac_attr2type(attribute, &type); if (code != 0) return code; /* -1 is a magic type that refers to the entire PAC */ if (type == (krb5_ui_4)-1) { krb5_pac newpac; code = krb5_pac_parse(kcontext, value->data, value->length, &newpac); if (code != 0) return code; krb5_pac_free(kcontext, pacctx->pac); pacctx->pac = newpac; } else { code = krb5_pac_add_buffer(kcontext, pacctx->pac, type, value); } return code; } static krb5_error_code mspac_export_internal(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_boolean restrict_authenticated, void **ptr) { struct mspac_context *pacctx = (struct mspac_context *)request_context; krb5_error_code code; krb5_pac pac; *ptr = NULL; if (pacctx->pac == NULL) return ENOENT; if (restrict_authenticated && (pacctx->pac->verified) == FALSE) return ENOENT; code = krb5_pac_parse(kcontext, pacctx->pac->data.data, pacctx->pac->data.length, &pac); if (code == 0) { pac->verified = pacctx->pac->verified; *ptr = pac; } return code; } static void mspac_free_internal(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, void *ptr) { if (ptr != NULL) krb5_pac_free(kcontext, (krb5_pac)ptr); return; } static krb5_error_code mspac_size(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, size_t *sizep) { struct mspac_context *pacctx = (struct mspac_context *)request_context; *sizep += sizeof(krb5_int32); if (pacctx->pac != NULL) *sizep += pacctx->pac->data.length; *sizep += sizeof(krb5_int32); return 0; } static krb5_error_code mspac_externalize(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_octet **buffer, size_t *lenremain) { krb5_error_code code = 0; struct mspac_context *pacctx = (struct mspac_context *)request_context; size_t required = 0; krb5_octet *bp; size_t remain; bp = *buffer; remain = *lenremain; if (pacctx->pac != NULL) { mspac_size(kcontext, context, plugin_context, request_context, &required); if (required <= remain) { krb5_ser_pack_int32((krb5_int32)pacctx->pac->data.length, &bp, &remain); krb5_ser_pack_bytes((krb5_octet *)pacctx->pac->data.data, (size_t)pacctx->pac->data.length, &bp, &remain); krb5_ser_pack_int32((krb5_int32)pacctx->pac->verified, &bp, &remain); } else { code = ENOMEM; } } else { krb5_ser_pack_int32(0, &bp, &remain); /* length */ krb5_ser_pack_int32(0, &bp, &remain); /* verified */ } *buffer = bp; *lenremain = remain; return code; } static krb5_error_code mspac_internalize(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, krb5_octet **buffer, size_t *lenremain) { struct mspac_context *pacctx = (struct mspac_context *)request_context; krb5_error_code code; krb5_int32 ibuf; krb5_octet *bp; size_t remain; krb5_pac pac = NULL; bp = *buffer; remain = *lenremain; /* length */ code = krb5_ser_unpack_int32(&ibuf, &bp, &remain); if (code != 0) return code; if (ibuf != 0) { code = krb5_pac_parse(kcontext, bp, ibuf, &pac); if (code != 0) return code; bp += ibuf; remain -= ibuf; } /* verified */ code = krb5_ser_unpack_int32(&ibuf, &bp, &remain); if (code != 0) { krb5_pac_free(kcontext, pac); return code; } if (pac != NULL) { pac->verified = (ibuf != 0); } if (pacctx->pac != NULL) { krb5_pac_free(kcontext, pacctx->pac); } pacctx->pac = pac; *buffer = bp; *lenremain = remain; return 0; } static krb5_error_code mspac_copy(krb5_context kcontext, krb5_authdata_context context, void *plugin_context, void *request_context, void *dst_plugin_context, void *dst_request_context) { struct mspac_context *srcctx = (struct mspac_context *)request_context; struct mspac_context *dstctx = (struct mspac_context *)dst_request_context; krb5_error_code code = 0; assert(dstctx != NULL); assert(dstctx->pac == NULL); if (srcctx->pac != NULL) code = k5_pac_copy(kcontext, srcctx->pac, &dstctx->pac); return code; } static krb5_authdatatype mspac_ad_types[] = { KRB5_AUTHDATA_WIN2K_PAC, 0 }; krb5plugin_authdata_client_ftable_v0 k5_mspac_ad_client_ftable = { "mspac", mspac_ad_types, mspac_init, mspac_fini, mspac_flags, mspac_request_init, mspac_request_fini, mspac_get_attribute_types, mspac_get_attribute, mspac_set_attribute, NULL, /* delete_attribute_proc */ mspac_export_authdata, mspac_import_authdata, mspac_export_internal, mspac_free_internal, mspac_verify, mspac_size, mspac_externalize, mspac_internalize, mspac_copy };
Java
package com.p.service; import java.util.Collection; import java.util.Optional; import java.util.Random; import java.util.UUID; import javax.annotation.Resource; import org.apache.log4j.Logger; import org.hibernate.SessionFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContext; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Isolation; import org.springframework.transaction.annotation.Transactional; import org.springframework.util.Assert; import com.p.model.Notificacion; import com.p.model.Role; import com.p.model.User; import com.p.model.modelAux.RegisterUser; import com.p.model.repositories.UserRepository; @Service("usersService") @Transactional(isolation = Isolation.READ_UNCOMMITTED) public class UsersService { protected static Logger logger = Logger.getLogger("service"); @Resource(name = "sessionFactory") private SessionFactory sessionFactory; @Autowired private UserRepository repository; @Autowired private NotificacionService notificacionService; @Autowired private EmailManager emailManager; @Autowired private PasswordEncoder passwordEncoder; @Transactional /** * Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0) * * @param id * el id del usuario existente */ public void delete(Integer id) { Assert.notNull(id); Assert.isTrue(id > 0); repository.delete(id); } /** * Guarda o edita sengún si el ID esta o no relleno * * @param us */ @Transactional() public User save(User us) { gestionarAvatar(us); gestionarAltaUsuario(us); User usr = repository.save(us); return usr; } protected void gestionarAltaUsuario(User us) { if (us.getId() == null || us.getId().equals(0)) { gestionarNotificacionAltaUsuario(us); gestionarEmailAltaUsuario(us); } } protected void gestionarEmailAltaUsuario(User us) { emailManager.notify(us); } /** * @param us */ protected void gestionarNotificacionAltaUsuario(User us) { // Es nuevo usuario // Le enviamos un email y una notificacion Notificacion notificacion = notificacionService.create(); Optional<User> admin = repository.findAdministradores().stream() .findFirst(); Assert.isTrue(admin.isPresent()); User administrador = admin.get(); notificacion.setEmisor(administrador); notificacion.setReceptor(us); notificacion.setTitulo("Gracias por registrarte en Pachanga!"); notificacion .setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)"); notificacionService.save(notificacion); } /** * @param us */ protected void gestionarAvatar(User us) { if (us.getAvatar() == null) { Random rd = new Random(); us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]); } } @Transactional public User getByEmail(String login) { Assert.notNull(login); Assert.isTrue(login.length() > 0); return repository.findByEmail(login); } @Transactional public User findOne(Integer id) { Assert.notNull(id); Assert.isTrue(id > -1); return repository.findOne(id); } @Transactional public Collection<User> findAll() { return repository.findAll(); } @Transactional public Collection<User> findAllDifferent(String email) { return repository.findAllDifferent(email); } @Transactional(readOnly = true) /** * * @author David Romero Alcaide * @return */ public User getPrincipal() { User result; SecurityContext context; Authentication authentication; Object principal; // If the asserts in this method fail, then you're // likely to have your Tomcat's working directory // corrupt. Please, clear your browser's cache, stop // Tomcat, update your Maven's project configuration, // clean your project, clean Tomcat's working directory, // republish your project, and start it over. context = SecurityContextHolder.getContext(); Assert.notNull(context); authentication = context.getAuthentication(); Assert.notNull(authentication); principal = authentication.getPrincipal(); Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User); result = getByEmail(((org.springframework.security.core.userdetails.User) principal) .getUsername()); Assert.notNull(result); Assert.isTrue(result.getId() != 0); return result; } public User map(RegisterUser user) { User usr = create(); usr.setEmail(user.getEmail()); usr.setPassword(user.getPassword()); return usr; } public User create() { User user = new User(); user.setFirstName(" "); user.setLastName(" "); user.setRole(Role.ROLE_USER); return user; } @Transactional public void regenerarPassword(User user) { String newPass = UUID.randomUUID().toString(); newPass = passwordEncoder.encode(newPass); user.setPassword(newPass); save(user); emailManager.notifyNewPassword(user,newPass); } @Transactional(isolation = Isolation.READ_UNCOMMITTED) public byte[] findImage(Integer id) { Assert.notNull(id); Assert.isTrue(id > 0); return repository.findImage(id); } @Transactional(readOnly = true) public Collection<? extends User> find(String texto) { return repository.findFullText(texto); } }
Java
from must import MustHavePatterns from successor import Successor class TestSuccessor(object): @classmethod def setup_class(cls): cls.test_patterns = MustHavePatterns(Successor) def test_successor(self): try: self.test_patterns.create(Successor) raise Exception("Recursive structure did not explode.") except RuntimeError as re: assert str(re).startswith("maximum recursion depth")
Java
(function() { 'use strict'; angular .module('fitappApp') .controller('RequestResetController', RequestResetController); RequestResetController.$inject = ['$timeout', 'Auth']; function RequestResetController ($timeout, Auth) { var vm = this; vm.error = null; vm.errorEmailNotExists = null; vm.requestReset = requestReset; vm.resetAccount = {}; vm.success = null; $timeout(function (){angular.element('#email').focus();}); function requestReset () { vm.error = null; vm.errorEmailNotExists = null; Auth.resetPasswordInit(vm.resetAccount.email).then(function () { vm.success = 'OK'; }).catch(function (response) { vm.success = null; if (response.status === 400 && response.data === 'e-mail address not registered') { vm.errorEmailNotExists = 'ERROR'; } else { vm.error = 'ERROR'; } }); } } })();
Java
#-*- encoding: utf-8 -*- import csv, math, time, re, threading, sys try: from urllib.request import urlopen except ImportError: from urllib import urlopen class ErAPI(): # Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles def __init__(self): self.data = {} # Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}} # Diccionario de puntos/rango self.rank_required_points = { "Recruit": 0, "Private": 15, "Private*": 45, "Private**": 80, "Private***": 120, "Corporal": 170, "Corporal*": 250, "Corporal**": 350, "Corporal***": 450, "Sergeant": 600, "Sergeant*": 800, "Sergeant**": 1000, "Sergeant***": 1400, "Lieutenant": 1850, "Lieutenant*": 2350, "Lieutenant**": 3000, "Lieutenant***": 3750, "Captain": 5000, "Captain*": 6500, "Captain**": 9000, "Captain***": 12000, "Major": 15500, "Major*": 20000, "Major**": 25000, "Major***": 31000, "Commander": 40000, "Commander*": 52000, "Commander**": 67000, "Commander***": 85000, "Lt Colonel": 110000, "Lt Colonel*": 140000, "Lt Colonel**": 180000, "Lt Colonel***": 225000, "Colonel": 285000, "Colonel*": 355000, "Colonel**": 435000, "Colonel***": 540000, "General": 660000, "General*": 800000, "General**": 950000, "General***": 1140000, "Field Marshal": 1350000, "Field Marshal*": 1600000, "Field Marshal**": 1875000, "Field Marshal***": 2185000, "Supreme Marshal": 2550000, "Supreme Marshal*": 3000000, "Supreme Marshal**": 3500000, "Supreme Marshal***": 4150000, "National Force": 4900000, "National Force*": 5800000, "National Force**": 7000000, "National Force***": 9000000, "World Class Force": 11500000, "World Class Force*": 14500000, "World Class Force**": 18000000, "World Class Force***": 22000000, "Legendary Force": 26500000, "Legendary Force*": 31500000, "Legendary Force**": 37000000, "Legendary Force***": 42000000, "God of War": 50000000, "God of War*": 100000000 , "God of War**": 200000000, "God of War***": 500000000, "Titan": 1000000000, "Titan*": 2000000000, "Titan**": 4000000000, "Titan***": 10000000000} # Lista ordenada de rangos segun importancia self.rank_to_pos = [ "Recruit", "Private", "Private*", "Private**", "Private***", "Corporal", "Corporal*", "Corporal**", "Corporal***", "Sergeant", "Sergeant*", "Sergeant**", "Sergeant***", "Lieutenant", "Lieutenant*", "Lieutenant**", "Lieutenant***", "Captain", "Captain*", "Captain**", "Captain***", "Major", "Major*", "Major**", "Major***", "Commander", "Commander*", "Commander**", "Commander***", "Lt Colonel", "Lt Colonel*", "Lt Colonel**", "Lt Colonel***", "Colonel", "Colonel*", "Colonel**", "Colonel***", "General", "General*", "General**", "General***", "Field Marshal", "Field Marshal*", "Field Marshal**", "Field Marshal***", "Supreme Marshal", "Supreme Marshal*", "Supreme Marshal**", "Supreme Marshal***", "National Force", "National Force*", "National Force**", "National Force***", "World Class Force", "World Class Force*", "World Class Force**", "World Class Force***", "Legendary Force", "Legendary Force*", "Legendary Force**", "Legendary Force***", "God of War", "God of War*", "God of War**", "God of War***", "Titan", "Titan*", "Titan**", "Titan***",] # Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos self.run = True # Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura" th = threading.Thread(target=self.data_loader) th.daemon = True th.start() # Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor def data_loader(self): self.load_data() self.data_saver_th = threading.Thread(target=self.data_saver) self.data_saver_th.daemon = True self.data_saver_th.start() self.data_updater_th = threading.Thread(target=self.data_updater) self.data_updater_th.daemon = True self.data_updater_th.start() # Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader def data_saver(self): while self.run: self.save_data() time.sleep(60) # Metodo para actualizar informacion, solo llamado de metodo data_loader def data_updater(self): while self.run: for irc_nick in self.data: self.update_data(irc_nick) time.sleep(30) time.sleep(600) # ---------------------------------------------------------------------------------- # # @ PUBLIC METHODS # # ---------------------------------------------------------------------------------- # # Metodo para actualizar informacion local del objeto desde archivo def load_data(self): try: f = open('data/er_nick-data.csv', 'rt') reader = csv.reader(f) for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader: self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship} f.close() except: pass # Metodo para guardar informacion local del objeto en archivo def save_data(self): try: f = open('data/er_nick-data.csv', 'wt') writer = csv.writer(f) for u in self.data: writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']]) f.close() except: pass # Metodo scraper para actualizar informacion local del objeto del nick de irc especificado def update_data(self, irc_nick): try: id = self.data[irc_nick]['id'] c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id) page = c.read() c.close() self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1) self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1)) self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',','')) self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',','')) self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1) except: pass # Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo def reg_nick_write(self, nick, id): if(nick.lower() in self.data.keys()): self.data[nick.lower()]['id'] = int(id) else: self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''} self.update_data(nick.lower()) # Metodo para obtener ID del nick de irc especificado def get_id(self, nick): return self.data[nick.lower()]['id'] # Metodo para obtener LEVEL del nick de irc especificado def get_level(self, nick): return self.data[nick.lower()]['level'] # Metodo para obtener STRENGTH del nick de irc especificado def get_strength(self, nick): return self.data[nick.lower()]['strength'] # Metodo para obtener RANK POINTS del nick de irc especificado def get_rank_points(self, nick): return self.data[nick.lower()]['rank_points'] # Metodo para obtener CITIZENSHIP del nick de irc especificado def get_citizenship(self, nick): return self.data[nick.lower()]['citizenship'] # Metodo para obtener NICK INGAME del nick de irc especificado def get_nick(self, nick): return self.data[nick.lower()]['nick'] # Metodo para obtener RANK NAME del nick de irc especificado def calculate_rank_name(self, rank_points): index = 0 for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]: if(self.rank_to_pos.index(k) > index): index = self.rank_to_pos.index(k) return self.rank_to_pos[index] # Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales def calculate_damage(self, rank_points, strength, weapon_power, level, bonus): index = 0 for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]: if(self.rank_to_pos.index(k) > index): index = self.rank_to_pos.index(k) return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus))
Java
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html lang="en-GB" xml:lang="en-GB" xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"/> <title>amod</title> <link rel="root" href=""/> <!-- for JS --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css"> <link rel="stylesheet" type="text/css" href="../../css/jquery-ui-redmond.css"/> <link rel="stylesheet" type="text/css" href="../../css/style.css"/> <link rel="stylesheet" type="text/css" href="../../css/style-vis.css"/> <link rel="stylesheet" type="text/css" href="../../css/hint.css"/> <script type="text/javascript" src="../../lib/ext/head.load.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/anchor-js/3.2.2/anchor.min.js"></script> <script>document.addEventListener("DOMContentLoaded", function(event) {anchors.add();});</script> <!-- Set up this custom Google search at https://cse.google.com/cse/business/settings?cx=001145188882102106025:dl1mehhcgbo --> <!-- DZ 2021-01-22: I am temporarily hiding the search field to find out whether it slows down loading of the title page. <script> (function() { var cx = '001145188882102106025:dl1mehhcgbo'; var gcse = document.createElement('script'); gcse.type = 'text/javascript'; gcse.async = true; gcse.src = 'https://cse.google.com/cse.js?cx=' + cx; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(gcse, s); })(); </script> --> <!-- <link rel="shortcut icon" href="favicon.ico"/> --> </head> <body> <div id="main" class="center"> <div id="hp-header"> <table width="100%"><tr><td width="50%"> <span class="header-text"><a href="http://universaldependencies.org/#language-uk">home</a></span> <span class="header-text"><a href="https://github.com/universaldependencies/docs/edit/pages-source/_uk/dep/amod.md" target="#">edit page</a></span> <span class="header-text"><a href="https://github.com/universaldependencies/docs/issues">issue tracker</a></span> </td><td> <gcse:search></gcse:search> </td></tr></table> </div> <hr/> <div class="v2complete"> This page pertains to UD version 2. </div> <div id="content"> <noscript> <div id="noscript"> It appears that you have Javascript disabled. Please consider enabling Javascript for this page to see the visualizations. </div> </noscript> <!-- The content may include scripts and styles, hence we must load the shared libraries before the content. --> <script type="text/javascript"> console.time('loading libraries'); var root = '../../'; // filled in by jekyll head.js( // External libraries // DZ: Copied from embedding.html. I don't know which one is needed for what, so I'm currently keeping them all. root + 'lib/ext/jquery.min.js', root + 'lib/ext/jquery.svg.min.js', root + 'lib/ext/jquery.svgdom.min.js', root + 'lib/ext/jquery.timeago.js', root + 'lib/ext/jquery-ui.min.js', root + 'lib/ext/waypoints.min.js', root + 'lib/ext/jquery.address.min.js' ); </script> <h2><code>amod</code>: adjectival modifier</h2> <p>An adjectival modifier of a noun is any adjectival phrase that serves to modify the meaning of the noun.</p> <p>Exception: if the modifying adjectival word is pronominal (i.e. tagged <a href="">uk-pos/DET</a>), the relation is <a href="">det</a> instead of <code class="language-plaintext highlighter-rouge">amod</code>.</p> <pre><code class="language-sdparse">Ніна їсть зелене яблуко . \n Nina is-eating (a) green apple . amod(яблуко, зелене) amod(apple, green) </code></pre> <!--~~~ conllu--> <!--1 Ніна Ніна NPROP NSN _ 2 nsubj _ _--> <!--2 їсть їсти VERB VPR3s _ 0 root _ _--> <!--3 зелене зелений ADJ ASA _ 4 amod _ _--> <!--4 яблуко яблуко NOUN NSA _ 2 dobj _ _--> <!--~~~--> <pre><code class="language-sdparse">Ігор взяв десятитисячну позику . \n Igor has taken (a) ten-thousand loan . amod(позику, десятитисячну) amod(loan, ten-thousand) </code></pre> <pre><code class="language-sdparse">Перший бігун був швидкий . \n The-first racer was fast . amod(бігун, Перший) amod(racer, The-first) nsubj(швидкий, бігун) nsubj(fast, racer) </code></pre> <pre><code class="language-sdparse">Швидкий бігун був перший . \n The-fast racer was first . amod(бігун, Швидкий) amod(racer, The-fast) nsubj(перший, бігун) nsubj(first, racer) </code></pre> <!-- Interlanguage links updated St lis 3 20:58:38 CET 2021 --> <!-- "in other languages" links --> <hr/> amod in other languages: [<a href="../../bej/dep/amod.html">bej</a>] [<a href="../../bg/dep/amod.html">bg</a>] [<a href="../../bm/dep/amod.html">bm</a>] [<a href="../../cop/dep/amod.html">cop</a>] [<a href="../../cs/dep/amod.html">cs</a>] [<a href="../../de/dep/amod.html">de</a>] [<a href="../../el/dep/amod.html">el</a>] [<a href="../../en/dep/amod.html">en</a>] [<a href="../../es/dep/amod.html">es</a>] [<a href="../../et/dep/amod.html">et</a>] [<a href="../../eu/dep/amod.html">eu</a>] [<a href="../../fi/dep/amod.html">fi</a>] [<a href="../../fr/dep/amod.html">fr</a>] [<a href="../../fro/dep/amod.html">fro</a>] [<a href="../../ga/dep/amod.html">ga</a>] [<a href="../../gsw/dep/amod.html">gsw</a>] [<a href="../../hy/dep/amod.html">hy</a>] [<a href="../../it/dep/amod.html">it</a>] [<a href="../../ja/dep/amod.html">ja</a>] [<a href="../../kk/dep/amod.html">kk</a>] [<a href="../../no/dep/amod.html">no</a>] [<a href="../../pcm/dep/amod.html">pcm</a>] [<a href="../../pt/dep/amod.html">pt</a>] [<a href="../../ro/dep/amod.html">ro</a>] [<a href="../../ru/dep/amod.html">ru</a>] [<a href="../../sv/dep/amod.html">sv</a>] [<a href="../../swl/dep/amod.html">swl</a>] [<a href="../../tr/dep/amod.html">tr</a>] [<a href="../../u/dep/amod.html">u</a>] [<a href="../../uk/dep/amod.html">uk</a>] [<a href="../../urj/dep/amod.html">urj</a>] [<a href="../../vi/dep/amod.html">vi</a>] [<a href="../../yue/dep/amod.html">yue</a>] [<a href="../../zh/dep/amod.html">zh</a>] </div> <!-- support for embedded visualizations --> <script type="text/javascript"> var root = '../../'; // filled in by jekyll head.js( // We assume that external libraries such as jquery.min.js have already been loaded outside! // (See _layouts/base.html.) // brat helper modules root + 'lib/brat/configuration.js', root + 'lib/brat/util.js', root + 'lib/brat/annotation_log.js', root + 'lib/ext/webfont.js', // brat modules root + 'lib/brat/dispatcher.js', root + 'lib/brat/url_monitor.js', root + 'lib/brat/visualizer.js', // embedding configuration root + 'lib/local/config.js', // project-specific collection data root + 'lib/local/collections.js', // Annodoc root + 'lib/annodoc/annodoc.js', // NOTE: non-local libraries 'https://spyysalo.github.io/conllu.js/conllu.js' ); var webFontURLs = [ // root + 'static/fonts/Astloch-Bold.ttf', root + 'static/fonts/PT_Sans-Caption-Web-Regular.ttf', root + 'static/fonts/Liberation_Sans-Regular.ttf' ]; var setupTimeago = function() { jQuery("time.timeago").timeago(); }; head.ready(function() { setupTimeago(); // mark current collection (filled in by Jekyll) Collections.listing['_current'] = 'uk'; // perform all embedding and support functions Annodoc.activate(Config.bratCollData, Collections.listing); }); </script> <!-- google analytics --> <script> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-55233688-1', 'auto'); ga('send', 'pageview'); </script> <div id="footer"> <p class="footer-text">&copy; 2014–2021 <a href="http://universaldependencies.org/introduction.html#contributors" style="color:gray">Universal Dependencies contributors</a>. Site powered by <a href="http://spyysalo.github.io/annodoc" style="color:gray">Annodoc</a> and <a href="http://brat.nlplab.org/" style="color:gray">brat</a></p>. </div> </div> </body> </html>
Java
# Espeletiopsis cristalinensis (Cuatrec.) Cuatrec. SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
Java
/* Slideshow container */ .slideshow-container { max-width: 1000px; position: relative; margin: auto; } /* Next & previous buttons */ .prev, .next { cursor: pointer; position: absolute; top: 50%; width: auto; margin-top: -22px; padding: 16px; color: white; font-weight: bold; font-size: 18px; transition: 0.6s ease; border-radius: 0 3px 3px 0; } /* Position the "next button" to the right */ .next { right: 0; border-radius: 3px 0 0 3px; } /* On hover, add a black background color with a little bit see-through */ .prev:hover, .next:hover { background-color: rgba(0,0,0,0.8); } /* Caption text */ .text { color: #f2f2f2; font-size: 15px; padding: 8px 12px; position: absolute; bottom: 8px; width: 100%; text-align: center; } /* Number text (1/3 etc) */ .numbertext { color: #f2f2f2; font-size: 12px; padding: 8px 12px; position: absolute; top: 0; } /* The dots/bullets/indicators */ .dot { cursor:pointer; height: 13px; width: 13px; margin: 0 2px; background-color: #bbb; border-radius: 50%; display: inline-block; transition: background-color 0.6s ease; } .active, .dot:hover { background-color: #717171; } /* Fading animation */ .fade { -webkit-animation-name: fade; -webkit-animation-duration: 1.5s; animation-name: fade; animation-duration: 1.5s; } @-webkit-keyframes fade { from {opacity: .4} to {opacity: 1} } @keyframes fade { from {opacity: .4} to {opacity: 1} }
Java
package org.apache.solr.cloud; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.http.params.CoreConnectionPNames; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrServer; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.impl.CloudSolrServer; import org.apache.solr.client.solrj.impl.HttpSolrServer; import org.apache.solr.client.solrj.request.QueryRequest; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocRouter; import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.cloud.ZkCoreNodeProps; import org.apache.solr.common.cloud.ZkStateReader; import org.apache.solr.common.params.CollectionParams; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.util.Hash; import org.apache.solr.handler.admin.CollectionsHandler; import org.apache.solr.update.DirectUpdateHandler2; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.net.MalformedURLException; import java.util.HashMap; import java.util.List; import java.util.Map; public class ShardSplitTest extends BasicDistributedZkTest { public static final String SHARD1_0 = SHARD1 + "_0"; public static final String SHARD1_1 = SHARD1 + "_1"; @Override @Before public void setUp() throws Exception { super.setUp(); System.setProperty("numShards", Integer.toString(sliceCount)); System.setProperty("solr.xml.persist", "true"); } @Override @After public void tearDown() throws Exception { super.tearDown(); if (VERBOSE || printLayoutOnTearDown) { super.printLayout(); } if (controlClient != null) { controlClient.shutdown(); } if (cloudClient != null) { cloudClient.shutdown(); } if (controlClientCloud != null) { controlClientCloud.shutdown(); } super.tearDown(); System.clearProperty("zkHost"); System.clearProperty("numShards"); System.clearProperty("solr.xml.persist"); // insurance DirectUpdateHandler2.commitOnClose = true; } @Override public void doTest() throws Exception { waitForThingsToLevelOut(15); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter(); Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1); DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange(); final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range); final int[] docCounts = new int[ranges.size()]; int numReplicas = shard1.getReplicas().size(); del("*:*"); for (int id = 0; id < 100; id++) { indexAndUpdateCount(ranges, docCounts, id); } commit(); Thread indexThread = new Thread() { @Override public void run() { for (int id = 101; id < atLeast(401); id++) { try { indexAndUpdateCount(ranges, docCounts, id); Thread.sleep(atLeast(25)); } catch (Exception e) { log.error("Exception while adding doc", e); } } } }; indexThread.start(); splitShard(SHARD1); log.info("Layout after split: \n"); printLayout(); indexThread.join(); commit(); checkDocCountsAndShardStates(docCounts, numReplicas); // todo can't call waitForThingsToLevelOut because it looks for jettys of all shards // and the new sub-shards don't have any. waitForRecoveriesToFinish(true); //waitForThingsToLevelOut(15); } protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException { SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_"); query.set("distrib", false); ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0); HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl()); QueryResponse response = shard1_0Server.query(query); long shard10Count = response.getResults().getNumFound(); ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1); HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl()); QueryResponse response2 = shard1_1Server.query(query); long shard11Count = response2.getResults().getNumFound(); logDebugHelp(docCounts, response, shard10Count, response2, shard11Count); assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count); assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count); ClusterState clusterState = null; Slice slice1_0 = null, slice1_1 = null; int i = 0; for (i = 0; i < 10; i++) { ZkStateReader zkStateReader = cloudClient.getZkStateReader(); zkStateReader.updateClusterState(true); clusterState = zkStateReader.getClusterState(); slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0"); slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1"); if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState())) break; Thread.sleep(500); } log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500); assertNotNull("Cluster state does not contain shard1_0", slice1_0); assertNotNull("Cluster state does not contain shard1_0", slice1_1); assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState()); assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState()); assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size()); assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size()); } protected void splitShard(String shardId) throws SolrServerException, IOException { ModifiableSolrParams params = new ModifiableSolrParams(); params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString()); params.set("collection", "collection1"); params.set("shard", shardId); SolrRequest request = new QueryRequest(params); request.setPath("/admin/collections"); String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient) .getBaseURL(); baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length()); HttpSolrServer baseServer = new HttpSolrServer(baseUrl); baseServer.setConnectionTimeout(15000); baseServer.setSoTimeout((int) (CollectionsHandler.DEFAULT_ZK_TIMEOUT * 5)); baseServer.request(request); } protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception { indexr("id", id); // todo - hook in custom hashing byte[] bytes = String.valueOf(id).getBytes("UTF-8"); int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0); for (int i = 0; i < ranges.size(); i++) { DocRouter.Range range = ranges.get(i); if (range.includes(hash)) docCounts[i]++; } } protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) { for (int i = 0; i < docCounts.length; i++) { int docCount = docCounts[i]; log.info("Expected docCount for shard1_{} = {}", i, docCount); } log.info("Actual docCount for shard1_0 = {}", shard10Count); log.info("Actual docCount for shard1_1 = {}", shard11Count); Map<String, String> idVsVersion = new HashMap<String, String>(); Map<String, SolrDocument> shard10Docs = new HashMap<String, SolrDocument>(); Map<String, SolrDocument> shard11Docs = new HashMap<String, SolrDocument>(); for (int i = 0; i < response.getResults().size(); i++) { SolrDocument document = response.getResults().get(i); idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString()); SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document); if (old != null) { log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_")); } } for (int i = 0; i < response2.getResults().size(); i++) { SolrDocument document = response2.getResults().get(i); String value = document.getFieldValue("id").toString(); String version = idVsVersion.get(value); if (version != null) { log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_")); } SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document); if (old != null) { log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_")); } } } @Override protected SolrServer createNewSolrServer(String collection, String baseUrl) { HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl); server.setSoTimeout(5 * 60 * 1000); return server; } @Override protected SolrServer createNewSolrServer(int port) { HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port); server.setSoTimeout(5 * 60 * 1000); return server; } @Override protected CloudSolrServer createCloudClient(String defaultCollection) throws MalformedURLException { CloudSolrServer client = super.createCloudClient(defaultCollection); client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000); return client; } }
Java
# Phoma amaranthi Brunaud SPECIES #### Status ACCEPTED #### According to Index Fungorum #### Published in Bull. Torrey bot. Club 20: 251 (1893) #### Original name Phoma amaranthi Brunaud ### Remarks null
Java
package org.zstack.header.identity; import org.zstack.header.message.APICreateMessage; import org.zstack.header.message.APIMessage; import org.zstack.header.message.APIParam; @NeedRoles(roles = {IdentityRoles.CREATE_POLICY_ROLE}) public class APICreatePolicyMsg extends APICreateMessage implements AccountMessage { @APIParam private String name; private String description; @APIParam private String policyData; @Override public String getAccountUuid() { return this.getSession().getAccountUuid(); } public String getPolicyData() { return policyData; } public void setPolicyData(String policyData) { this.policyData = policyData; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } }
Java
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Xml.Serialization; namespace SAM.DTO { [XmlType(TypeName = "user")] public class User { public string id { get; set; } public string name { get; set; } public string avatar_url { get; set; } } [XmlType(TypeName = "users")] public class UserList : SamList<User> { } }
Java
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.profiler.memory; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.MapMaker; import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible; import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe; import com.google.devtools.build.lib.packages.AspectClass; import com.google.devtools.build.lib.packages.RuleClass; import com.google.devtools.build.lib.packages.RuleFunction; import com.google.devtools.build.lib.syntax.Debug; import com.google.devtools.build.lib.syntax.Location; import com.google.devtools.build.lib.syntax.StarlarkCallable; import com.google.devtools.build.lib.syntax.StarlarkThread; import com.google.monitoring.runtime.instrumentation.Sampler; import com.google.perftools.profiles.ProfileProto.Function; import com.google.perftools.profiles.ProfileProto.Line; import com.google.perftools.profiles.ProfileProto.Profile; import com.google.perftools.profiles.ProfileProto.Sample; import com.google.perftools.profiles.ProfileProto.ValueType; import java.io.FileOutputStream; import java.io.IOException; import java.time.Instant; import java.util.HashMap; import java.util.Map; import java.util.Random; import java.util.zip.GZIPOutputStream; import javax.annotation.Nullable; /** Tracks allocations for memory reporting. */ @ConditionallyThreadCompatible @SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global public final class AllocationTracker implements Sampler, Debug.ThreadHook { // A mapping from Java thread to StarlarkThread. // Used to effect a hidden StarlarkThread parameter to sampleAllocation. // TODO(adonovan): opt: merge the three different ThreadLocals in use here. private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>(); @Override public void onPushFirst(StarlarkThread thread) { starlarkThread.set(thread); } @Override public void onPopLast(StarlarkThread thread) { starlarkThread.remove(); } private static class AllocationSample { @Nullable final RuleClass ruleClass; // Current rule being analysed, if any @Nullable final AspectClass aspectClass; // Current aspect being analysed, if any final ImmutableList<Frame> callstack; // Starlark callstack, if any final long bytes; AllocationSample( @Nullable RuleClass ruleClass, @Nullable AspectClass aspectClass, ImmutableList<Frame> callstack, long bytes) { this.ruleClass = ruleClass; this.aspectClass = aspectClass; this.callstack = callstack; this.bytes = bytes; } } private static class Frame { final String name; final Location loc; @Nullable final RuleFunction ruleFunction; Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) { this.name = name; this.loc = loc; this.ruleFunction = ruleFunction; } } private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap(); private final int samplePeriod; private final int sampleVariance; private boolean enabled = true; /** * Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation. */ private static final class LongValue { long value; } private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new); private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample); private final Random random = new Random(); AllocationTracker(int samplePeriod, int variance) { this.samplePeriod = samplePeriod; this.sampleVariance = variance; } // Called by instrumentation.recordAllocation, which is in turn called // by an instrumented version of the application assembled on the fly // by instrumentation.AllocationInstrumenter. // The instrumenter inserts a call to recordAllocation after every // memory allocation instruction in the original class. // // This function runs within 'new', so is not supposed to allocate memory; // see Sampler interface. In fact it allocates in nearly a dozen places. // TODO(adonovan): suppress reentrant calls by setting a thread-local flag. @Override @ThreadSafe public void sampleAllocation(int count, String desc, Object newObj, long size) { if (!enabled) { return; } @Nullable StarlarkThread thread = starlarkThread.get(); // Calling Debug.getCallStack is a dubious operation here. // First it allocates memory, which breaks the Sampler contract. // Second, the allocation could in principle occur while the thread's // representation invariants are temporarily broken (that is, during // the call to ArrayList.add when pushing a new stack frame). // For now at least, the allocation done by ArrayList.add occurs before // the representation of the ArrayList is changed, so it is safe, // but this is a fragile assumption. ImmutableList<Debug.Frame> callstack = thread != null ? Debug.getCallStack(thread) : ImmutableList.of(); RuleClass ruleClass = CurrentRuleTracker.getRule(); AspectClass aspectClass = CurrentRuleTracker.getAspect(); // Should we bother sampling? if (callstack.isEmpty() && ruleClass == null && aspectClass == null) { return; } // Convert the thread's stack right away to our internal form. // It is not safe to inspect Debug.Frame references once the thread resumes, // and keeping StarlarkCallable values live defeats garbage collection. ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size()); for (Debug.Frame fr : callstack) { // The frame's PC location is currently not updated at every step, // only at function calls, so the leaf frame's line number may be // slightly off; see the tests. // TODO(b/149023294): remove comment when we move to a compiled representation. StarlarkCallable fn = fr.getFunction(); frames.add( new Frame( fn.getName(), fr.getLocation(), fn instanceof RuleFunction ? (RuleFunction) fn : null)); } // If we start getting stack overflows here, it's because the memory sampling // implementation has changed to call back into the sampling method immediately on // every allocation. Since thread locals can allocate, this can in this case lead // to infinite recursion. This method will then need to be rewritten to not // allocate, or at least not allocate to obtain its sample counters. LongValue bytesValue = currentSampleBytes.get(); long bytes = bytesValue.value + size; if (bytes < nextSampleBytes.get()) { bytesValue.value = bytes; return; } bytesValue.value = 0; nextSampleBytes.set(getNextSample()); allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes)); } private long getNextSample() { return (long) samplePeriod + (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0); } /** A pair of rule/aspect name and the bytes it consumes. */ public static final class RuleBytes { private final String name; private long bytes; public RuleBytes(String name) { this.name = name; } /** The number of bytes total occupied by this rule or aspect class. */ public long getBytes() { return bytes; } public RuleBytes addBytes(long bytes) { this.bytes += bytes; return this; } @Override public String toString() { return String.format("RuleBytes(%s, %d)", name, bytes); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RuleBytes ruleBytes = (RuleBytes) o; return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name); } @Override public int hashCode() { return Objects.hashCode(name, bytes); } } // If the topmost stack entry is a call to a rule function, returns it. @Nullable private static RuleFunction getRule(AllocationSample sample) { Frame top = Iterables.getLast(sample.callstack, null); return top != null ? top.ruleFunction : null; } /** * Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey} * or {@link AspectClass#getKey}. */ public void getRuleMemoryConsumption( Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) { // Make sure we don't track our own allocations enabled = false; System.gc(); // Get loading phase memory for rules. for (AllocationSample sample : allocations.values()) { RuleFunction rule = getRule(sample); if (rule != null) { RuleClass ruleClass = rule.getRuleClass(); String key = ruleClass.getKey(); RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName())); rules.put(key, ruleBytes.addBytes(sample.bytes)); } } // Get analysis phase memory for rules and aspects for (AllocationSample sample : allocations.values()) { if (sample.ruleClass != null) { String key = sample.ruleClass.getKey(); RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName())); rules.put(key, ruleBytes.addBytes(sample.bytes)); } if (sample.aspectClass != null) { String key = sample.aspectClass.getKey(); RuleBytes ruleBytes = aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName())); aspects.put(key, ruleBytes.addBytes(sample.bytes)); } } enabled = true; } /** Dumps all Starlark analysis time allocations to a pprof-compatible file. */ public void dumpSkylarkAllocations(String path) throws IOException { // Make sure we don't track our own allocations enabled = false; System.gc(); Profile profile = buildMemoryProfile(); try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) { profile.writeTo(outputStream); outputStream.finish(); } enabled = true; } Profile buildMemoryProfile() { Profile.Builder profile = Profile.newBuilder(); StringTable stringTable = new StringTable(profile); FunctionTable functionTable = new FunctionTable(profile, stringTable); LocationTable locationTable = new LocationTable(profile, functionTable); profile.addSampleType( ValueType.newBuilder() .setType(stringTable.get("memory")) .setUnit(stringTable.get("bytes")) .build()); for (AllocationSample sample : allocations.values()) { // Skip empty callstacks if (sample.callstack.isEmpty()) { continue; } Sample.Builder b = Sample.newBuilder().addValue(sample.bytes); for (Frame fr : sample.callstack.reverse()) { b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line())); } profile.addSample(b.build()); } profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000); return profile.build(); } private static class StringTable { final Profile.Builder profile; final Map<String, Long> table = new HashMap<>(); long index = 0; StringTable(Profile.Builder profile) { this.profile = profile; get(""); // 0 is reserved for the empty string } long get(String str) { return table.computeIfAbsent( str, key -> { profile.addStringTable(key); return index++; }); } } private static class FunctionTable { final Profile.Builder profile; final StringTable stringTable; final Map<String, Long> table = new HashMap<>(); long index = 1; // 0 is reserved FunctionTable(Profile.Builder profile, StringTable stringTable) { this.profile = profile; this.stringTable = stringTable; } long get(String file, String function) { return table.computeIfAbsent( file + "#" + function, key -> { Function fn = Function.newBuilder() .setId(index) .setFilename(stringTable.get(file)) .setName(stringTable.get(function)) .build(); profile.addFunction(fn); return index++; }); } } private static class LocationTable { final Profile.Builder profile; final FunctionTable functionTable; final Map<String, Long> table = new HashMap<>(); long index = 1; // 0 is reserved LocationTable(Profile.Builder profile, FunctionTable functionTable) { this.profile = profile; this.functionTable = functionTable; } long get(String file, String function, long line) { return table.computeIfAbsent( file + "#" + function + "#" + line, key -> { com.google.perftools.profiles.ProfileProto.Location location = com.google.perftools.profiles.ProfileProto.Location.newBuilder() .setId(index) .addLine( Line.newBuilder() .setFunctionId(functionTable.get(file, function)) .setLine(line) .build()) .build(); profile.addLocation(location); return index++; }); } } }
Java
/** * @file ff_lpc546xx.c * @brief board ID for the NXP LPC54608Xpresso board * * DAPLink Interface Firmware * Copyright (c) 2009-2019, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "target_family.h" #include "target_board.h" const board_info_t g_board_info = { .info_version = kBoardInfoVersion, .board_id = "8081", .family_id = kStub_HWReset_FamilyID, .daplink_url_name = "PRODINFOHTM", .daplink_drive_name = "FF-LPC546XX", .daplink_target_url = "https://os.mbed.com/platforms/L-TEK-FF-LPC546XX", .target_cfg = &target_device, };
Java
=head1 LICENSE See the NOTICE file distributed with this work for additional information regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =head1 NAME Bio::EnsEMBL::Compara::PipeConfig::EPO_conf =head1 SYNOPSIS init_pipeline.pl Bio::EnsEMBL::Compara::PipeConfig::EPO_conf -host mysql-ens-compara-prod-X -port XXXX \ -division $COMPARA_DIV -species_set_name <species_set_name> =head1 DESCRIPTION This PipeConfig file gives defaults for mapping (using exonerate at the moment) anchors to a set of target genomes (dumped text files). =cut package Bio::EnsEMBL::Compara::PipeConfig::EPO_conf; use strict; use warnings; use Bio::EnsEMBL::Hive::Version 2.4; use Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf; # For INPUT_PLUS use Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOMapAnchors; use Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOAlignment; use base ('Bio::EnsEMBL::Compara::PipeConfig::ComparaGeneric_conf'); sub default_options { my ($self) = @_; return { %{$self->SUPER::default_options}, 'pipeline_name' => $self->o('species_set_name').'_epo_'.$self->o('rel_with_suffix'), 'method_type' => 'EPO', # Databases 'compara_master' => 'compara_master', # Database containing the anchors for mapping 'compara_anchor_db' => $self->o('species_set_name') . '_epo_anchors', # The previous database to reuse the anchor mappings 'reuse_db' => $self->o('species_set_name') . '_epo_prev', # The ancestral_db is created on the same server as the pipeline_db 'ancestral_db' => { -driver => $self->o('pipeline_db', '-driver'), -host => $self->o('pipeline_db', '-host'), -port => $self->o('pipeline_db', '-port'), -species => $self->o('ancestral_sequences_name'), -user => $self->o('pipeline_db', '-user'), -pass => $self->o('pipeline_db', '-pass'), -dbname => $self->o('dbowner').'_'.$self->o('species_set_name').'_ancestral_core_'.$self->o('rel_with_suffix'), }, 'ancestral_sequences_name' => 'ancestral_sequences', 'ancestral_sequences_display_name' => 'Ancestral sequences', # Executable parameters 'mapping_params' => { bestn=>11, gappedextension=>"no", softmasktarget=>"no", percent=>75, showalignment=>"no", model=>"affine:local", }, 'enredo_params' => ' --min-score 0 --max-gap-length 200000 --max-path-dissimilarity 4 --min-length 10000 --min-regions 2 --min-anchors 3 --max-ratio 3 --simplify-graph 7 --bridges -o ', 'gerp_window_sizes' => [1,10,100,500], #gerp window sizes # Dump directory 'work_dir' => $self->o('pipeline_dir'), 'enredo_output_file' => $self->o('work_dir').'/enredo_output.txt', 'bed_dir' => $self->o('work_dir').'/bed', 'feature_dir' => $self->o('work_dir').'/feature_dump', 'enredo_mapping_file' => $self->o('work_dir').'/enredo_input.txt', 'bl2seq_dump_dir' => $self->o('work_dir').'/bl2seq', # location for dumping sequences to determine strand (for bl2seq) 'bl2seq_file_stem' => '#bl2seq_dump_dir#/bl2seq', 'output_dir' => '#feature_dir#', # alias # Options #skip this module if set to 1 'skip_multiplealigner_stats' => 0, # dont dump the MT sequence for mapping 'only_nuclear_genome' => 1, # add MT dnafrags separately (1) or not (0) to the dnafrag_region table 'add_non_nuclear_alignments' => 1, # batch size of anchor sequences to map 'anchor_batch_size' => 1000, # Usually set to 0 because we run Gerp on the EPO2X alignment instead 'run_gerp' => 0, # Capacities 'low_capacity' => 10, 'map_anchors_batch_size' => 5, 'map_anchors_capacity' => 2000, 'trim_anchor_align_batch_size' => 20, 'trim_anchor_align_capacity' => 500, }; } sub pipeline_create_commands { my ($self) = @_; return [ @{$self->SUPER::pipeline_create_commands}, # inheriting database and hive tables' creation $self->pipeline_create_commands_rm_mkdir(['work_dir', 'bed_dir', 'feature_dir', 'bl2seq_dump_dir']), ]; } sub pipeline_wide_parameters { my $self = shift @_; return { %{$self->SUPER::pipeline_wide_parameters}, # directories 'work_dir' => $self->o('work_dir'), 'feature_dir' => $self->o('feature_dir'), 'enredo_output_file' => $self->o('enredo_output_file'), 'bed_dir' => $self->o('bed_dir'), 'genome_dumps_dir' => $self->o('genome_dumps_dir'), 'enredo_mapping_file' => $self->o('enredo_mapping_file'), 'bl2seq_dump_dir' => $self->o('bl2seq_dump_dir'), 'bl2seq_file_stem' => $self->o('bl2seq_file_stem'), # databases 'compara_anchor_db' => $self->o('compara_anchor_db'), 'master_db' => $self->o('compara_master'), 'reuse_db' => $self->o('reuse_db'), 'ancestral_db' => $self->o('ancestral_db'), # options 'run_gerp' => $self->o('run_gerp'), }; } sub core_pipeline_analyses { my ($self) = @_; return [ { -logic_name => 'load_mlss_id', -module => 'Bio::EnsEMBL::Compara::RunnableDB::LoadMLSSids', -parameters => { 'method_type' => $self->o('method_type'), 'species_set_name' => $self->o('species_set_name'), 'release' => $self->o('ensembl_release'), }, -input_ids => [{}], -flow_into => { '1->A' => [ 'copy_table_factory', 'set_internal_ids', 'drop_ancestral_db' ], 'A->1' => 'reuse_anchor_align_factory', } }, @{ Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOMapAnchors::pipeline_analyses_epo_anchor_mapping($self) }, @{ Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOAlignment::pipeline_analyses_epo_alignment($self) }, ]; } sub tweak_analyses { my $self = shift; my $analyses_by_name = shift; # Move "make_species_tree" right after "create_mlss_ss" and disconnect it from "dump_mappings_to_file" $analyses_by_name->{'create_mlss_ss'}->{'-flow_into'} = [ 'make_species_tree' ]; $analyses_by_name->{'make_species_tree'}->{'-flow_into'} = WHEN( '#run_gerp#' => [ 'set_gerp_neutral_rate' ] ); delete $analyses_by_name->{'set_gerp_neutral_rate'}->{'-flow_into'}->{1}; # Do "dump_mappings_to_file" after having trimmed the anchors $analyses_by_name->{'trim_anchor_align_factory'}->{'-flow_into'} = { '2->A' => $analyses_by_name->{'trim_anchor_align_factory'}->{'-flow_into'}->{2}, 'A->1' => [ 'dump_mappings_to_file' ], }; } 1;
Java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <axis2_http_simple_response.h> #include <axis2_http_transport.h> #include <axutil_string.h> #include <stdio.h> #include <string.h> #include <axutil_types.h> #include <axiom_mime_part.h> #define AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE 2048 struct axis2_http_simple_response { axis2_http_status_line_t *status_line; axutil_array_list_t *header_group; axutil_stream_t *stream; axutil_array_list_t *mime_parts; axis2_char_t *mtom_sending_callback_name; }; AXIS2_EXTERN axis2_http_simple_response_t *AXIS2_CALL axis2_http_simple_response_create( const axutil_env_t * env, axis2_http_status_line_t * status_line, const axis2_http_header_t ** http_headers, const axis2_ssize_t http_hdr_count, axutil_stream_t * content) { axis2_http_simple_response_t *ret = NULL; ret = axis2_http_simple_response_create_default(env); if(!ret) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 http simple response creation failed"); return NULL; } ret->status_line = status_line; if(http_hdr_count > 0 && http_headers) { int i = 0; ret->header_group = axutil_array_list_create(env, http_hdr_count); for(i = 0; i < (int)http_hdr_count; i++) /* We are sure that the difference lies within the int range */ { axutil_array_list_add(ret->header_group, env, (void *)http_headers[i]); } } ret->stream = content; return ret; } AXIS2_EXTERN axis2_http_simple_response_t *AXIS2_CALL axis2_http_simple_response_create_default( const axutil_env_t * env) { axis2_http_simple_response_t *simple_response = NULL; simple_response = (axis2_http_simple_response_t *)AXIS2_MALLOC(env->allocator, sizeof(axis2_http_simple_response_t)); if(!simple_response) { AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NO_MEMORY, AXIS2_FAILURE); return NULL; } memset((void *)simple_response, 0, sizeof(axis2_http_simple_response_t)); return simple_response; } void AXIS2_CALL axis2_http_simple_response_free( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { if(simple_response->status_line) { axis2_http_status_line_free(simple_response->status_line, env); } if(simple_response->header_group) { int i = 0; for(i = 0; i < axutil_array_list_size(simple_response->header_group, env); i++) { void *tmp = NULL; tmp = axutil_array_list_get(simple_response-> header_group, env, i); if(tmp) { axis2_http_header_free((axis2_http_header_t *)tmp, env); } } axutil_array_list_free(simple_response->header_group, env); } if(simple_response->mime_parts) { int i = 0; for(i = 0; i < axutil_array_list_size(simple_response->mime_parts, env); i++) { void *mime_part = NULL; mime_part = axutil_array_list_get(simple_response->mime_parts, env, i); if(mime_part) { axiom_mime_part_free((axiom_mime_part_t *)mime_part, env); } } axutil_array_list_free(simple_response->mime_parts, env); } /* Stream is not freed. Assumption : stream doesn't belong to the response */ AXIS2_FREE(env->allocator, simple_response); } axis2_status_t AXIS2_CALL axis2_http_simple_response_set_status_line( struct axis2_http_simple_response * simple_response, const axutil_env_t * env, const axis2_char_t * http_ver, const int status_code, const axis2_char_t * phrase) { if(!http_ver || !phrase || !status_code) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "invalid parameter given"); return AXIS2_FAILURE; } if(simple_response->status_line) { axis2_http_status_line_free(simple_response->status_line, env); } simple_response->status_line = axis2_http_status_line_create_with_values( env, http_ver, status_code, phrase); if(!simple_response->status_line) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "http status line creation failed for string %s %3d %s", http_ver, status_code, phrase); return AXIS2_FAILURE; } return AXIS2_SUCCESS; } axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_phrase( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { if(!(simple_response->status_line)) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , status line is not available"); return NULL; } return axis2_http_status_line_get_reason_phrase(simple_response-> status_line, env); } int AXIS2_CALL axis2_http_simple_response_get_status_code( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { if(!(simple_response->status_line)) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , status line is not available"); return -1; } return axis2_http_status_line_get_status_code(simple_response->status_line, env); } axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_http_version( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { if(!(simple_response->status_line)) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , status line is not available"); return NULL; } return axis2_http_status_line_get_http_version(simple_response->status_line, env); } axis2_status_t AXIS2_CALL axis2_http_simple_response_set_http_version( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axis2_char_t *http_version) { if(!(simple_response->status_line)) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , status line is not available"); return AXIS2_FAILURE; } axis2_http_status_line_set_http_version(simple_response->status_line, env, http_version); return AXIS2_SUCCESS; } axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_status_line( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { if(!(simple_response->status_line)) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , status line is not available"); return NULL; } return axis2_http_status_line_to_string(simple_response->status_line, env); } AXIS2_EXTERN axutil_array_list_t *AXIS2_CALL axis2_http_simple_response_get_headers( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { return simple_response->header_group; } axutil_array_list_t *AXIS2_CALL axis2_http_simple_response_extract_headers( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { axutil_array_list_t *temp = NULL; temp = simple_response->header_group; if(temp) { simple_response->header_group = NULL; } return temp; } axis2_http_header_t *AXIS2_CALL axis2_http_simple_response_get_first_header( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, const axis2_char_t * str) { axis2_http_header_t *tmp_header = NULL; axis2_char_t *tmp_name = NULL; int i = 0; int count = 0; axutil_array_list_t *header_group = NULL; AXIS2_PARAM_CHECK(env->error, str, NULL); header_group = simple_response->header_group; if(!simple_response->header_group) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , headers not available"); return NULL; } if(0 == axutil_array_list_size(header_group, env)) { AXIS2_LOG_WARNING(env->log, AXIS2_LOG_SI, "axis2 simple response , contains zero headers"); return NULL; } count = axutil_array_list_size(header_group, env); for(i = 0; i < count; i++) { tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i); tmp_name = axis2_http_header_get_name(tmp_header, env); if(0 == axutil_strcasecmp(str, tmp_name)) { return tmp_header; } } return NULL; } axis2_status_t AXIS2_CALL axis2_http_simple_response_remove_headers( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, const axis2_char_t * str) { axutil_array_list_t *header_group = NULL; int i = 0; int count = 0; AXIS2_PARAM_CHECK(env->error, str, AXIS2_FAILURE); header_group = simple_response->header_group; if(!header_group) { /* Even though we couldn't complete the op, we are sure that the * required header is no more in the request. So we can proceed without a * problem. */ return AXIS2_SUCCESS; } count = axutil_array_list_size(header_group, env); for(i = 0; i < count; i++) { axis2_http_header_t *tmp_header = NULL; axis2_char_t *tmp_name = NULL; tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i); tmp_name = axis2_http_header_get_name(tmp_header, env); if(0 == axutil_strcasecmp(str, tmp_name)) { axis2_http_header_free(tmp_header, env); axutil_array_list_remove(header_group, env, i); break; } } return AXIS2_SUCCESS; } axis2_status_t AXIS2_CALL axis2_http_simple_response_set_header( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axis2_http_header_t * header) { int i = 0; int count = 0; axutil_array_list_t *header_group = NULL; AXIS2_PARAM_CHECK(env->error, header, AXIS2_FAILURE); if(!simple_response->header_group) { simple_response->header_group = axutil_array_list_create(env, 10); axutil_array_list_add(simple_response->header_group, env, header); return AXIS2_SUCCESS; } /* If a header with the same name exists search and remove the old header */ header_group = simple_response->header_group; count = axutil_array_list_size(header_group, env); for(i = 0; i < count; i++) { axis2_http_header_t *tmp_header = NULL; axis2_char_t *tmp_name = NULL; tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i); tmp_name = axis2_http_header_get_name(tmp_header, env); if(0 == axutil_strcasecmp(axis2_http_header_get_name(header, env), tmp_name)) { axis2_http_header_free(tmp_header, env); axutil_array_list_set(header_group, env, i, header); return AXIS2_SUCCESS; } } /* if header is not found, then we have to add it */ axutil_array_list_add(header_group, env, header); return AXIS2_SUCCESS; } const axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_charset( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { axis2_http_header_t *tmp_header = NULL; tmp_header = axis2_http_simple_response_get_first_header(simple_response, env, AXIS2_HTTP_HEADER_CONTENT_TYPE); if(tmp_header) { axis2_char_t *value = axis2_http_header_get_value(tmp_header, env); axis2_char_t *charset = (axis2_char_t *)strstr((char *)value, (char *)AXIS2_HTTP_CHAR_SET_ENCODING); if(charset) { charset = strchr((char *)charset, AXIS2_EQ); return charset; } } return AXIS2_HTTP_DEFAULT_CONTENT_CHARSET; } axis2_ssize_t AXIS2_CALL axis2_http_simple_response_get_content_length( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { axis2_http_header_t *tmp_header = NULL; int error_return = -1; tmp_header = axis2_http_simple_response_get_first_header(simple_response, env, AXIS2_HTTP_HEADER_CONTENT_LENGTH); if(tmp_header) { return AXIS2_ATOI(axis2_http_header_get_value(tmp_header, env)); } return error_return; } const axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_content_type( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { axis2_http_header_t *tmp_header = NULL; tmp_header = axis2_http_simple_response_get_first_header(simple_response, env, AXIS2_HTTP_HEADER_CONTENT_TYPE); if(tmp_header) { return axis2_http_header_get_value(tmp_header, env); } return AXIS2_HTTP_HEADER_ACCEPT_TEXT_PLAIN; } axis2_status_t AXIS2_CALL axis2_http_simple_response_set_body_string( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axis2_char_t * str) { axutil_stream_t *body_stream = NULL; AXIS2_PARAM_CHECK(env->error, str, AXIS2_FAILURE); body_stream = simple_response->stream; if(!body_stream) { body_stream = axutil_stream_create_basic(env); if(!body_stream) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "unable to create basic stream for string %s", str); return AXIS2_FAILURE; } simple_response->stream = body_stream; } axutil_stream_write(body_stream, env, str, axutil_strlen(str)); return AXIS2_SUCCESS; } axis2_status_t AXIS2_CALL axis2_http_simple_response_set_body_stream( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axutil_stream_t * stream) { /* * We don't free the stream * Problem in freeing is most of the time the stream doesn't belong * to the http_simple_response */ simple_response->stream = stream; return AXIS2_SUCCESS; } axutil_stream_t *AXIS2_CALL axis2_http_simple_response_get_body( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { return simple_response->stream; } axis2_ssize_t AXIS2_CALL axis2_http_simple_response_get_body_bytes( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axis2_char_t ** buffer) { axutil_stream_t *tmp_stream = NULL; axis2_bool_t loop_state = AXIS2_TRUE; int return_size = -1; if(!simple_response->stream) { AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NULL_BODY, AXIS2_FAILURE); return return_size; } tmp_stream = axutil_stream_create_basic(env); while(loop_state) { int read = 0; /*int write = 0;*/ char buf[AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE]; read = axutil_stream_read(simple_response->stream, env, buf, AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE); if(read < 0) { break; } /*write = */axutil_stream_write(tmp_stream, env, buf, read); if(read < (AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE - 1)) { break; } } return_size = axutil_stream_get_len(tmp_stream, env); if(return_size > 0) { *buffer = (char *)AXIS2_MALLOC(env->allocator, sizeof(char) * (return_size + 1)); if(!buffer) { AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NO_MEMORY, AXIS2_FAILURE); return -1; } return_size = axutil_stream_read(tmp_stream, env, *buffer, return_size + 1); } axutil_stream_free(tmp_stream, env); return return_size; } axis2_bool_t AXIS2_CALL axis2_http_simple_response_contains_header( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, const axis2_char_t * name) { axis2_char_t *header_name = NULL; int count = 0; int i = 0; AXIS2_PARAM_CHECK(env->error, name, AXIS2_FAILURE); if(!simple_response->header_group) { AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , headers not available"); return AXIS2_FALSE; } count = axutil_array_list_size(simple_response->header_group, env); if(0 == count) { AXIS2_LOG_WARNING(env->log, AXIS2_LOG_SI, "axis2 simple response , contains zero headers"); return AXIS2_FALSE; } for(i = 0; i < count; i++) { axis2_http_header_t *header = (axis2_http_header_t *)axutil_array_list_get( simple_response->header_group, env, i); header_name = axis2_http_header_get_name(header, env); if(0 == axutil_strcasecmp(name, header_name)) { return AXIS2_TRUE; } } return AXIS2_FALSE; } AXIS2_EXTERN axutil_array_list_t *AXIS2_CALL axis2_http_simple_response_get_mime_parts( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { return simple_response->mime_parts; } void AXIS2_EXTERN AXIS2_CALL axis2_http_simple_response_set_mime_parts( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axutil_array_list_t *mime_parts) { simple_response->mime_parts = mime_parts; } AXIS2_EXTERN axis2_char_t *AXIS2_CALL axis2_http_simple_response_get_mtom_sending_callback_name( axis2_http_simple_response_t * simple_response, const axutil_env_t * env) { return simple_response->mtom_sending_callback_name; } void AXIS2_EXTERN AXIS2_CALL axis2_http_simple_response_set_mtom_sending_callback_name( axis2_http_simple_response_t * simple_response, const axutil_env_t * env, axis2_char_t *mtom_sending_callback_name) { simple_response->mtom_sending_callback_name = mtom_sending_callback_name; }
Java