code
stringlengths 4
1.01M
| language
stringclasses 2
values |
|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.executiongraph;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.api.common.Archiveable;
import org.apache.flink.api.common.InputDependencyConstraint;
import org.apache.flink.api.common.accumulators.Accumulator;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.core.io.InputSplit;
import org.apache.flink.runtime.JobException;
import org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult;
import org.apache.flink.runtime.checkpoint.CheckpointOptions;
import org.apache.flink.runtime.checkpoint.CheckpointType;
import org.apache.flink.runtime.checkpoint.JobManagerTaskRestore;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.clusterframework.types.ResourceID;
import org.apache.flink.runtime.clusterframework.types.ResourceProfile;
import org.apache.flink.runtime.clusterframework.types.SlotProfile;
import org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor;
import org.apache.flink.runtime.concurrent.FutureUtils;
import org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptor;
import org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory;
import org.apache.flink.runtime.execution.ExecutionState;
import org.apache.flink.runtime.instance.SlotSharingGroupId;
import org.apache.flink.runtime.io.network.partition.PartitionTracker;
import org.apache.flink.runtime.io.network.partition.ResultPartitionID;
import org.apache.flink.runtime.jobgraph.IntermediateDataSetID;
import org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID;
import org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.LocationPreferenceConstraint;
import org.apache.flink.runtime.jobmanager.scheduler.NoResourceAvailableException;
import org.apache.flink.runtime.jobmanager.scheduler.ScheduledUnit;
import org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup;
import org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway;
import org.apache.flink.runtime.jobmaster.LogicalSlot;
import org.apache.flink.runtime.jobmaster.SlotRequestId;
import org.apache.flink.runtime.jobmaster.slotpool.SlotProvider;
import org.apache.flink.runtime.messages.Acknowledge;
import org.apache.flink.runtime.messages.StackTraceSampleResponse;
import org.apache.flink.runtime.shuffle.PartitionDescriptor;
import org.apache.flink.runtime.shuffle.ProducerDescriptor;
import org.apache.flink.runtime.shuffle.ShuffleDescriptor;
import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
import org.apache.flink.runtime.taskmanager.TaskManagerLocation;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.FlinkRuntimeException;
import org.apache.flink.util.OptionalFailure;
import org.apache.flink.util.function.ThrowingRunnable;
import org.slf4j.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.apache.flink.runtime.deployment.TaskDeploymentDescriptorFactory.getConsumedPartitionShuffleDescriptor;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELED;
import static org.apache.flink.runtime.execution.ExecutionState.CANCELING;
import static org.apache.flink.runtime.execution.ExecutionState.CREATED;
import static org.apache.flink.runtime.execution.ExecutionState.DEPLOYING;
import static org.apache.flink.runtime.execution.ExecutionState.FAILED;
import static org.apache.flink.runtime.execution.ExecutionState.FINISHED;
import static org.apache.flink.runtime.execution.ExecutionState.RUNNING;
import static org.apache.flink.runtime.execution.ExecutionState.SCHEDULED;
import static org.apache.flink.util.Preconditions.checkNotNull;
/**
* A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times
* (for recovery, re-computation, re-configuration), this class tracks the state of a single execution
* of that vertex and the resources.
*
* <h2>Lock free state transitions</h2>
*
* <p>In several points of the code, we need to deal with possible concurrent state changes and actions.
* For example, while the call to deploy a task (send it to the TaskManager) happens, the task gets cancelled.
*
* <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to running) such that
* it is guaranteed that any "cancel command" will only pick up after deployment is done and that the "cancel
* command" call will never overtake the deploying call.
*
* <p>This blocks the threads big time, because the remote calls may take long. Depending of their locking behavior, it
* may even result in distributed deadlocks (unless carefully avoided). We therefore use atomic state updates and
* occasional double-checking to ensure that the state after a completed call is as expected, and trigger correcting
* actions if it is not. Many actions are also idempotent (like canceling).
*/
public class Execution implements AccessExecution, Archiveable<ArchivedExecution>, LogicalSlot.Payload {
private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER =
AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state");
private static final AtomicReferenceFieldUpdater<Execution, LogicalSlot> ASSIGNED_SLOT_UPDATER = AtomicReferenceFieldUpdater.newUpdater(
Execution.class,
LogicalSlot.class,
"assignedResource");
private static final Logger LOG = ExecutionGraph.LOG;
private static final int NUM_CANCEL_CALL_TRIES = 3;
private static final int NUM_STOP_CALL_TRIES = 3;
// --------------------------------------------------------------------------------------------
/** The executor which is used to execute futures. */
private final Executor executor;
/** The execution vertex whose task this execution executes. */
private final ExecutionVertex vertex;
/** The unique ID marking the specific execution instant of the task. */
private final ExecutionAttemptID attemptId;
/** Gets the global modification version of the execution graph when this execution was created.
* This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions. */
private final long globalModVersion;
/** The timestamps when state transitions occurred, indexed by {@link ExecutionState#ordinal()}. */
private final long[] stateTimestamps;
private final int attemptNumber;
private final Time rpcTimeout;
private final Collection<PartitionInfo> partitionInfos;
/** A future that completes once the Execution reaches a terminal ExecutionState. */
private final CompletableFuture<ExecutionState> terminalStateFuture;
private final CompletableFuture<?> releaseFuture;
private final CompletableFuture<TaskManagerLocation> taskManagerLocationFuture;
private volatile ExecutionState state = CREATED;
private volatile LogicalSlot assignedResource;
private volatile Throwable failureCause; // once assigned, never changes
/** Information to restore the task on recovery, such as checkpoint id and task state snapshot. */
@Nullable
private volatile JobManagerTaskRestore taskRestore;
/** This field holds the allocation id once it was assigned successfully. */
@Nullable
private volatile AllocationID assignedAllocationID;
// ------------------------ Accumulators & Metrics ------------------------
/** Lock for updating the accumulators atomically.
* Prevents final accumulators to be overwritten by partial accumulators on a late heartbeat. */
private final Object accumulatorLock = new Object();
/* Continuously updated map of user-defined accumulators */
private volatile Map<String, Accumulator<?, ?>> userAccumulators;
private volatile IOMetrics ioMetrics;
private Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions;
// --------------------------------------------------------------------------------------------
/**
* Creates a new Execution attempt.
*
* @param executor
* The executor used to dispatch callbacks from futures and asynchronous RPC calls.
* @param vertex
* The execution vertex to which this Execution belongs
* @param attemptNumber
* The execution attempt number.
* @param globalModVersion
* The global modification version of the execution graph when this execution was created
* @param startTimestamp
* The timestamp that marks the creation of this Execution
* @param rpcTimeout
* The rpcTimeout for RPC calls like deploy/cancel/stop.
*/
public Execution(
Executor executor,
ExecutionVertex vertex,
int attemptNumber,
long globalModVersion,
long startTimestamp,
Time rpcTimeout) {
this.executor = checkNotNull(executor);
this.vertex = checkNotNull(vertex);
this.attemptId = new ExecutionAttemptID();
this.rpcTimeout = checkNotNull(rpcTimeout);
this.globalModVersion = globalModVersion;
this.attemptNumber = attemptNumber;
this.stateTimestamps = new long[ExecutionState.values().length];
markTimestamp(CREATED, startTimestamp);
this.partitionInfos = new ArrayList<>(16);
this.producedPartitions = Collections.emptyMap();
this.terminalStateFuture = new CompletableFuture<>();
this.releaseFuture = new CompletableFuture<>();
this.taskManagerLocationFuture = new CompletableFuture<>();
this.assignedResource = null;
}
// --------------------------------------------------------------------------------------------
// Properties
// --------------------------------------------------------------------------------------------
public ExecutionVertex getVertex() {
return vertex;
}
@Override
public ExecutionAttemptID getAttemptId() {
return attemptId;
}
@Override
public int getAttemptNumber() {
return attemptNumber;
}
@Override
public ExecutionState getState() {
return state;
}
@Nullable
public AllocationID getAssignedAllocationID() {
return assignedAllocationID;
}
/**
* Gets the global modification version of the execution graph when this execution was created.
*
* <p>This version is bumped in the ExecutionGraph whenever a global failover happens. It is used
* to resolve conflicts between concurrent modification by global and local failover actions.
*/
public long getGlobalModVersion() {
return globalModVersion;
}
public CompletableFuture<TaskManagerLocation> getTaskManagerLocationFuture() {
return taskManagerLocationFuture;
}
public LogicalSlot getAssignedResource() {
return assignedResource;
}
public Optional<ResultPartitionDeploymentDescriptor> getResultPartitionDeploymentDescriptor(
IntermediateResultPartitionID id) {
return Optional.ofNullable(producedPartitions.get(id));
}
/**
* Tries to assign the given slot to the execution. The assignment works only if the
* Execution is in state SCHEDULED. Returns true, if the resource could be assigned.
*
* @param logicalSlot to assign to this execution
* @return true if the slot could be assigned to the execution, otherwise false
*/
@VisibleForTesting
boolean tryAssignResource(final LogicalSlot logicalSlot) {
assertRunningInJobMasterMainThread();
checkNotNull(logicalSlot);
// only allow to set the assigned resource in state SCHEDULED or CREATED
// note: we also accept resource assignment when being in state CREATED for testing purposes
if (state == SCHEDULED || state == CREATED) {
if (ASSIGNED_SLOT_UPDATER.compareAndSet(this, null, logicalSlot)) {
if (logicalSlot.tryAssignPayload(this)) {
// check for concurrent modification (e.g. cancelling call)
if ((state == SCHEDULED || state == CREATED) && !taskManagerLocationFuture.isDone()) {
taskManagerLocationFuture.complete(logicalSlot.getTaskManagerLocation());
assignedAllocationID = logicalSlot.getAllocationId();
return true;
} else {
// free assigned resource and return false
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
ASSIGNED_SLOT_UPDATER.set(this, null);
return false;
}
} else {
// the slot already has another slot assigned
return false;
}
} else {
// do not allow resource assignment if we are not in state SCHEDULED
return false;
}
}
public InputSplit getNextInputSplit() {
final LogicalSlot slot = this.getAssignedResource();
final String host = slot != null ? slot.getTaskManagerLocation().getHostname() : null;
return this.vertex.getNextInputSplit(host);
}
@Override
public TaskManagerLocation getAssignedResourceLocation() {
// returns non-null only when a location is already assigned
final LogicalSlot currentAssignedResource = assignedResource;
return currentAssignedResource != null ? currentAssignedResource.getTaskManagerLocation() : null;
}
public Throwable getFailureCause() {
return failureCause;
}
@Override
public String getFailureCauseAsString() {
return ExceptionUtils.stringifyException(getFailureCause());
}
@Override
public long[] getStateTimestamps() {
return stateTimestamps;
}
@Override
public long getStateTimestamp(ExecutionState state) {
return this.stateTimestamps[state.ordinal()];
}
public boolean isFinished() {
return state.isTerminal();
}
@Nullable
public JobManagerTaskRestore getTaskRestore() {
return taskRestore;
}
/**
* Sets the initial state for the execution. The serialized state is then shipped via the
* {@link TaskDeploymentDescriptor} to the TaskManagers.
*
* @param taskRestore information to restore the state
*/
public void setInitialState(@Nullable JobManagerTaskRestore taskRestore) {
this.taskRestore = taskRestore;
}
/**
* Gets a future that completes once the task execution reaches a terminal state.
* The future will be completed with specific state that the execution reached.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the execution reaches a terminal state
*/
@Override
public CompletableFuture<ExecutionState> getTerminalStateFuture() {
return terminalStateFuture;
}
/**
* Gets the release future which is completed once the execution reaches a terminal
* state and the assigned resource has been released.
* This future is always completed from the job master's main thread.
*
* @return A future which is completed once the assigned resource has been released
*/
public CompletableFuture<?> getReleaseFuture() {
return releaseFuture;
}
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
public CompletableFuture<Void> scheduleForExecution() {
final ExecutionGraph executionGraph = getVertex().getExecutionGraph();
final SlotProvider resourceProvider = executionGraph.getSlotProvider();
final boolean allowQueued = executionGraph.isQueuedSchedulingAllowed();
return scheduleForExecution(
resourceProvider,
allowQueued,
LocationPreferenceConstraint.ANY,
Collections.emptySet());
}
/**
* NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if the tasks needs
* to be scheduled immediately and no resource is available. If the task is accepted by the schedule, any
* error sets the vertex state to failed and triggers the recovery logic.
*
* @param slotProvider The slot provider to use to allocate slot for this execution attempt.
* @param queued Flag to indicate whether the scheduler may queue this task if it cannot
* immediately deploy it.
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @return Future which is completed once the Execution has been deployed
*/
public CompletableFuture<Void> scheduleForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds) {
assertRunningInJobMasterMainThread();
final ExecutionGraph executionGraph = vertex.getExecutionGraph();
final Time allocationTimeout = executionGraph.getAllocationTimeout();
try {
final CompletableFuture<Execution> allocationFuture = allocateResourcesForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout);
final CompletableFuture<Void> deploymentFuture;
if (allocationFuture.isDone() || queued) {
deploymentFuture = allocationFuture.thenRun(ThrowingRunnable.unchecked(this::deploy));
} else {
deploymentFuture = FutureUtils.completedExceptionally(
new IllegalArgumentException("The slot allocation future has not been completed yet."));
}
deploymentFuture.whenComplete(
(Void ignored, Throwable failure) -> {
if (failure != null) {
final Throwable stripCompletionException = ExceptionUtils.stripCompletionException(failure);
final Throwable schedulingFailureCause;
if (stripCompletionException instanceof TimeoutException) {
schedulingFailureCause = new NoResourceAvailableException(
"Could not allocate enough slots within timeout of " + allocationTimeout + " to run the job. " +
"Please make sure that the cluster has enough resources.");
} else {
schedulingFailureCause = stripCompletionException;
}
markFailed(schedulingFailureCause);
}
});
return deploymentFuture;
} catch (IllegalExecutionStateException e) {
return FutureUtils.completedExceptionally(e);
}
}
/**
* Allocates resources for the execution.
*
* <p>Allocates following resources:
* <ol>
* <li>slot obtained from the slot provider</li>
* <li>registers produced partitions with the {@link org.apache.flink.runtime.shuffle.ShuffleMaster}</li>
* </ol>
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with this execution once the slot has been assigned
* or with an exception if an error occurred.
*/
CompletableFuture<Execution> allocateResourcesForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
return allocateAndAssignSlotForExecution(
slotProvider,
queued,
locationPreferenceConstraint,
allPreviousExecutionGraphAllocationIds,
allocationTimeout)
.thenCompose(slot -> registerProducedPartitions(slot.getTaskManagerLocation()));
}
/**
* Allocates and assigns a slot obtained from the slot provider to the execution.
*
* @param slotProvider to obtain a new slot from
* @param queued if the allocation can be queued
* @param locationPreferenceConstraint constraint for the location preferences
* @param allPreviousExecutionGraphAllocationIds set with all previous allocation ids in the job graph.
* Can be empty if the allocation ids are not required for scheduling.
* @param allocationTimeout rpcTimeout for allocating a new slot
* @return Future which is completed with the allocated slot once it has been assigned
* or with an exception if an error occurred.
*/
private CompletableFuture<LogicalSlot> allocateAndAssignSlotForExecution(
SlotProvider slotProvider,
boolean queued,
LocationPreferenceConstraint locationPreferenceConstraint,
@Nonnull Set<AllocationID> allPreviousExecutionGraphAllocationIds,
Time allocationTimeout) {
checkNotNull(slotProvider);
assertRunningInJobMasterMainThread();
final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();
// sanity check
if (locationConstraint != null && sharingGroup == null) {
throw new IllegalStateException(
"Trying to schedule with co-location constraint but without slot sharing allowed.");
}
// this method only works if the execution is in the state 'CREATED'
if (transitionState(CREATED, SCHEDULED)) {
final SlotSharingGroupId slotSharingGroupId = sharingGroup != null ? sharingGroup.getSlotSharingGroupId() : null;
ScheduledUnit toSchedule = locationConstraint == null ?
new ScheduledUnit(this, slotSharingGroupId) :
new ScheduledUnit(this, slotSharingGroupId, locationConstraint);
// try to extract previous allocation ids, if applicable, so that we can reschedule to the same slot
ExecutionVertex executionVertex = getVertex();
AllocationID lastAllocation = executionVertex.getLatestPriorAllocation();
Collection<AllocationID> previousAllocationIDs =
lastAllocation != null ? Collections.singletonList(lastAllocation) : Collections.emptyList();
// calculate the preferred locations
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture =
calculatePreferredLocations(locationPreferenceConstraint);
final SlotRequestId slotRequestId = new SlotRequestId();
final CompletableFuture<LogicalSlot> logicalSlotFuture =
preferredLocationsFuture.thenCompose(
(Collection<TaskManagerLocation> preferredLocations) ->
slotProvider.allocateSlot(
slotRequestId,
toSchedule,
new SlotProfile(
ResourceProfile.UNKNOWN,
preferredLocations,
previousAllocationIDs,
allPreviousExecutionGraphAllocationIds),
queued,
allocationTimeout));
// register call back to cancel slot request in case that the execution gets canceled
releaseFuture.whenComplete(
(Object ignored, Throwable throwable) -> {
if (logicalSlotFuture.cancel(false)) {
slotProvider.cancelSlotRequest(
slotRequestId,
slotSharingGroupId,
new FlinkException("Execution " + this + " was released."));
}
});
// This forces calls to the slot pool back into the main thread, for normal and exceptional completion
return logicalSlotFuture.handle(
(LogicalSlot logicalSlot, Throwable failure) -> {
if (failure != null) {
throw new CompletionException(failure);
}
if (tryAssignResource(logicalSlot)) {
return logicalSlot;
} else {
// release the slot
logicalSlot.releaseSlot(new FlinkException("Could not assign logical slot to execution " + this + '.'));
throw new CompletionException(
new FlinkException(
"Could not assign slot " + logicalSlot + " to execution " + this + " because it has already been assigned "));
}
});
} else {
// call race, already deployed, or already done
throw new IllegalExecutionStateException(this, CREATED, state);
}
}
@VisibleForTesting
CompletableFuture<Execution> registerProducedPartitions(TaskManagerLocation location) {
assertRunningInJobMasterMainThread();
return FutureUtils.thenApplyAsyncIfNotDone(
registerProducedPartitions(vertex, location, attemptId),
vertex.getExecutionGraph().getJobMasterMainThreadExecutor(),
producedPartitionsCache -> {
producedPartitions = producedPartitionsCache;
startTrackingPartitions(location.getResourceID(), producedPartitionsCache.values());
return this;
});
}
@VisibleForTesting
static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions(
ExecutionVertex vertex,
TaskManagerLocation location,
ExecutionAttemptID attemptId) {
ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId);
boolean lazyScheduling = vertex.getExecutionGraph().getScheduleMode().allowLazyDeployment();
Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values();
Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations =
new ArrayList<>(partitions.size());
for (IntermediateResultPartition partition : partitions) {
PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition);
int maxParallelism = getPartitionMaxParallelism(partition);
CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex
.getExecutionGraph()
.getShuffleMaster()
.registerPartitionWithProducer(partitionDescriptor, producerDescriptor);
final boolean releasePartitionOnConsumption =
vertex.getExecutionGraph().isForcePartitionReleaseOnConsumption()
|| !partitionDescriptor.getPartitionType().isBlocking();
CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture
.thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor(
partitionDescriptor,
shuffleDescriptor,
maxParallelism,
lazyScheduling,
releasePartitionOnConsumption
? ShuffleDescriptor.ReleaseType.AUTO
: ShuffleDescriptor.ReleaseType.MANUAL));
partitionRegistrations.add(partitionRegistration);
}
return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> {
Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions =
new LinkedHashMap<>(partitions.size());
rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd));
return producedPartitions;
});
}
private static int getPartitionMaxParallelism(IntermediateResultPartition partition) {
// TODO consumers.isEmpty() only exists for test, currently there has to be exactly one consumer in real jobs!
final List<List<ExecutionEdge>> consumers = partition.getConsumers();
int maxParallelism = KeyGroupRangeAssignment.UPPER_BOUND_MAX_PARALLELISM;
if (!consumers.isEmpty()) {
List<ExecutionEdge> consumer = consumers.get(0);
ExecutionJobVertex consumerVertex = consumer.get(0).getTarget().getJobVertex();
maxParallelism = consumerVertex.getMaxParallelism();
}
return maxParallelism;
}
/**
* Deploys the execution to the previously assigned resource.
*
* @throws JobException if the execution cannot be deployed to the assigned resource
*/
public void deploy() throws JobException {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
checkNotNull(slot, "In order to deploy the execution we first have to assign a resource via tryAssignResource.");
// Check if the TaskManager died in the meantime
// This only speeds up the response to TaskManagers failing concurrently to deployments.
// The more general check is the rpcTimeout of the deployment call
if (!slot.isAlive()) {
throw new JobException("Target slot (TaskManager) for deployment is no longer alive.");
}
// make sure exactly one deployment call happens from the correct state
// note: the transition from CREATED to DEPLOYING is for testing purposes only
ExecutionState previous = this.state;
if (previous == SCHEDULED || previous == CREATED) {
if (!transitionState(previous, DEPLOYING)) {
// race condition, someone else beat us to the deploying call.
// this should actually not happen and indicates a race somewhere else
throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race.");
}
}
else {
// vertex may have been cancelled, or it was already scheduled
throw new IllegalStateException("The vertex must be in CREATED or SCHEDULED state to be deployed. Found state " + previous);
}
if (this != slot.getPayload()) {
throw new IllegalStateException(
String.format("The execution %s has not been assigned to the assigned slot.", this));
}
try {
// race double check, did we fail/cancel and do we need to release the slot?
if (this.state != DEPLOYING) {
slot.releaseSlot(new FlinkException("Actual state of execution " + this + " (" + state + ") does not match expected state DEPLOYING."));
return;
}
if (LOG.isInfoEnabled()) {
LOG.info(String.format("Deploying %s (attempt #%d) to %s", vertex.getTaskNameWithSubtaskIndex(),
attemptNumber, getAssignedResourceLocation()));
}
final TaskDeploymentDescriptor deployment = TaskDeploymentDescriptorFactory
.fromExecutionVertex(vertex, attemptNumber)
.createDeploymentDescriptor(
slot.getAllocationId(),
slot.getPhysicalSlotNumber(),
taskRestore,
producedPartitions.values());
// null taskRestore to let it be GC'ed
taskRestore = null;
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
vertex.getExecutionGraph().getJobMasterMainThreadExecutor();
// We run the submission in the future executor so that the serialization of large TDDs does not block
// the main thread and sync back to the main thread once submission is completed.
CompletableFuture.supplyAsync(() -> taskManagerGateway.submitTask(deployment, rpcTimeout), executor)
.thenCompose(Function.identity())
.whenCompleteAsync(
(ack, failure) -> {
// only respond to the failure case
if (failure != null) {
if (failure instanceof TimeoutException) {
String taskname = vertex.getTaskNameWithSubtaskIndex() + " (" + attemptId + ')';
markFailed(new Exception(
"Cannot deploy task " + taskname + " - TaskManager (" + getAssignedResourceLocation()
+ ") not responding after a rpcTimeout of " + rpcTimeout, failure));
} else {
markFailed(failure);
}
}
},
jobMasterMainThreadExecutor);
}
catch (Throwable t) {
markFailed(t);
ExceptionUtils.rethrow(t);
}
}
public void cancel() {
// depending on the previous state, we go directly to cancelled (no cancel call necessary)
// -- or to canceling (cancel call needs to be sent to the task manager)
// because of several possibly previous states, we need to again loop until we make a
// successful atomic state transition
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == CANCELING || current == CANCELED) {
// already taken care of, no need to cancel again
return;
}
// these two are the common cases where we need to send a cancel call
else if (current == RUNNING || current == DEPLOYING) {
// try to transition to canceling, if successful, send the cancel call
if (startCancelling(NUM_CANCEL_CALL_TRIES)) {
return;
}
// else: fall through the loop
}
else if (current == FINISHED || current == FAILED) {
// nothing to do any more. finished/failed before it could be cancelled.
// in any case, the task is removed from the TaskManager already
return;
}
else if (current == CREATED || current == SCHEDULED) {
// from here, we can directly switch to cancelled, because no task has been deployed
if (cancelAtomically()) {
return;
}
// else: fall through the loop
}
else {
throw new IllegalStateException(current.name());
}
}
}
public CompletableFuture<?> suspend() {
switch(state) {
case RUNNING:
case DEPLOYING:
case CREATED:
case SCHEDULED:
if (!cancelAtomically()) {
throw new IllegalStateException(
String.format("Could not directly go to %s from %s.", CANCELED.name(), state.name()));
}
break;
case CANCELING:
completeCancelling();
break;
case FINISHED:
case FAILED:
case CANCELED:
break;
default:
throw new IllegalStateException(state.name());
}
return releaseFuture;
}
private void scheduleConsumer(ExecutionVertex consumerVertex) {
try {
final ExecutionGraph executionGraph = consumerVertex.getExecutionGraph();
consumerVertex.scheduleForExecution(
executionGraph.getSlotProvider(),
executionGraph.isQueuedSchedulingAllowed(),
LocationPreferenceConstraint.ANY, // there must be at least one known location
Collections.emptySet());
} catch (Throwable t) {
consumerVertex.fail(new IllegalStateException("Could not schedule consumer " +
"vertex " + consumerVertex, t));
}
}
void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
assertRunningInJobMasterMainThread();
final int numConsumers = allConsumers.size();
if (numConsumers > 1) {
fail(new IllegalStateException("Currently, only a single consumer group per partition is supported."));
} else if (numConsumers == 0) {
return;
}
for (ExecutionEdge edge : allConsumers.get(0)) {
final ExecutionVertex consumerVertex = edge.getTarget();
final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
final ExecutionState consumerState = consumer.getState();
// ----------------------------------------------------------------
// Consumer is created => try to schedule it and the partition info
// is known during deployment
// ----------------------------------------------------------------
if (consumerState == CREATED) {
// Schedule the consumer vertex if its inputs constraint is satisfied, otherwise skip the scheduling.
// A shortcut of input constraint check is added for InputDependencyConstraint.ANY since
// at least one of the consumer vertex's inputs is consumable here. This is to avoid the
// O(N) complexity introduced by input constraint check for InputDependencyConstraint.ANY,
// as we do not want the default scheduling performance to be affected.
if (consumerVertex.getInputDependencyConstraint() == InputDependencyConstraint.ANY ||
consumerVertex.checkInputDependencyConstraints()) {
scheduleConsumer(consumerVertex);
}
}
// ----------------------------------------------------------------
// Consumer is running => send update message now
// Consumer is deploying => cache the partition info which would be
// sent after switching to running
// ----------------------------------------------------------------
else if (consumerState == DEPLOYING || consumerState == RUNNING) {
final PartitionInfo partitionInfo = createPartitionInfo(edge);
if (consumerState == DEPLOYING) {
consumerVertex.cachePartitionInfo(partitionInfo);
} else {
consumer.sendUpdatePartitionInfoRpcCall(Collections.singleton(partitionInfo));
}
}
}
}
private static PartitionInfo createPartitionInfo(ExecutionEdge executionEdge) {
IntermediateDataSetID intermediateDataSetID = executionEdge.getSource().getIntermediateResult().getId();
ShuffleDescriptor shuffleDescriptor = getConsumedPartitionShuffleDescriptor(executionEdge, false);
return new PartitionInfo(intermediateDataSetID, shuffleDescriptor);
}
/**
* This method fails the vertex due to an external condition. The task will move to state FAILED.
* If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the TaskManager.
*
* @param t The exception that caused the task to fail.
*/
@Override
public void fail(Throwable t) {
processFail(t, false);
}
/**
* Request a stack trace sample from the task of this execution.
*
* @param sampleId of the stack trace sample
* @param numSamples the sample should contain
* @param delayBetweenSamples to wait
* @param maxStackTraceDepth of the samples
* @param timeout until the request times out
* @return Future stack trace sample response
*/
public CompletableFuture<StackTraceSampleResponse> requestStackTraceSample(
int sampleId,
int numSamples,
Time delayBetweenSamples,
int maxStackTraceDepth,
Time timeout) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
return taskManagerGateway.requestStackTraceSample(
attemptId,
sampleId,
numSamples,
delayBetweenSamples,
maxStackTraceDepth,
timeout);
} else {
return FutureUtils.completedExceptionally(new Exception("The execution has no slot assigned."));
}
}
/**
* Notify the task of this execution about a completed checkpoint.
*
* @param checkpointId of the completed checkpoint
* @param timestamp of the completed checkpoint
*/
public void notifyCheckpointComplete(long checkpointId, long timestamp) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.notifyCheckpointComplete(attemptId, getVertex().getJobId(), checkpointId, timestamp);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is " +
"no longer running.");
}
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
*/
public void triggerCheckpoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, false);
}
/**
* Trigger a new checkpoint on the task of this execution.
*
* @param checkpointId of th checkpoint to trigger
* @param timestamp of the checkpoint to trigger
* @param checkpointOptions of the checkpoint to trigger
* @param advanceToEndOfEventTime Flag indicating if the source should inject a {@code MAX_WATERMARK} in the pipeline
* to fire any registered event-time timers
*/
public void triggerSynchronousSavepoint(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
triggerCheckpointHelper(checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
}
private void triggerCheckpointHelper(long checkpointId, long timestamp, CheckpointOptions checkpointOptions, boolean advanceToEndOfEventTime) {
final CheckpointType checkpointType = checkpointOptions.getCheckpointType();
if (advanceToEndOfEventTime && !(checkpointType.isSynchronous() && checkpointType.isSavepoint())) {
throw new IllegalArgumentException("Only synchronous savepoints are allowed to advance the watermark to MAX.");
}
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
taskManagerGateway.triggerCheckpoint(attemptId, getVertex().getJobId(), checkpointId, timestamp, checkpointOptions, advanceToEndOfEventTime);
} else {
LOG.debug("The execution has no slot assigned. This indicates that the execution is no longer running.");
}
}
// --------------------------------------------------------------------------------------------
// Callbacks
// --------------------------------------------------------------------------------------------
/**
* This method marks the task as failed, but will make no attempt to remove task execution from the task manager.
* It is intended for cases where the task is known not to be running, or then the TaskManager reports failure
* (in which case it has already removed the task).
*
* @param t The exception that caused the task to fail.
*/
void markFailed(Throwable t) {
processFail(t, true);
}
void markFailed(Throwable t, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
processFail(t, true, userAccumulators, metrics);
}
@VisibleForTesting
void markFinished() {
markFinished(null, null);
}
void markFinished(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
assertRunningInJobMasterMainThread();
// this call usually comes during RUNNING, but may also come while still in deploying (very fast tasks!)
while (true) {
ExecutionState current = this.state;
if (current == RUNNING || current == DEPLOYING) {
if (transitionState(current, FINISHED)) {
try {
for (IntermediateResultPartition finishedPartition
: getVertex().finishAllBlockingPartitions()) {
IntermediateResultPartition[] allPartitions = finishedPartition
.getIntermediateResult().getPartitions();
for (IntermediateResultPartition partition : allPartitions) {
scheduleOrUpdateConsumers(partition.getConsumers());
}
}
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(null);
vertex.getExecutionGraph().deregisterExecution(this);
}
finally {
vertex.executionFinished(this);
}
return;
}
}
else if (current == CANCELING) {
// we sent a cancel call, and the task manager finished before it arrived. We
// will never get a CANCELED call back from the job manager
completeCancelling(userAccumulators, metrics);
return;
}
else if (current == CANCELED || current == FAILED) {
if (LOG.isDebugEnabled()) {
LOG.debug("Task FINISHED, but concurrently went to state " + state);
}
return;
}
else {
// this should not happen, we need to fail this
markFailed(new Exception("Vertex received FINISHED message while being in state " + state));
return;
}
}
}
private boolean cancelAtomically() {
if (startCancelling(0)) {
completeCancelling();
return true;
} else {
return false;
}
}
private boolean startCancelling(int numberCancelRetries) {
if (transitionState(state, CANCELING)) {
taskManagerLocationFuture.cancel(false);
sendCancelRpcCall(numberCancelRetries);
return true;
} else {
return false;
}
}
void completeCancelling() {
completeCancelling(null, null);
}
void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// the taskmanagers can themselves cancel tasks without an external trigger, if they find that the
// network stack is canceled (for example by a failing / canceling receiver or sender
// this is an artifact of the old network runtime, but for now we need to support task transitions
// from running directly to canceled
while (true) {
ExecutionState current = this.state;
if (current == CANCELED) {
return;
}
else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {
updateAccumulatorsAndMetrics(userAccumulators, metrics);
if (transitionState(current, CANCELED)) {
finishCancellation();
return;
}
// else fall through the loop
}
else {
// failing in the meantime may happen and is no problem.
// anything else is a serious problem !!!
if (current != FAILED) {
String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state);
LOG.error(message);
vertex.getExecutionGraph().failGlobal(new Exception(message));
}
return;
}
}
}
private void finishCancellation() {
releaseAssignedResource(new FlinkException("Execution " + this + " was cancelled."));
vertex.getExecutionGraph().deregisterExecution(this);
// release partitions on TM in case the Task finished while we where already CANCELING
stopTrackingAndReleasePartitions();
}
void cachePartitionInfo(PartitionInfo partitionInfo) {
partitionInfos.add(partitionInfo);
}
private void sendPartitionInfos() {
if (!partitionInfos.isEmpty()) {
sendUpdatePartitionInfoRpcCall(new ArrayList<>(partitionInfos));
partitionInfos.clear();
}
}
// --------------------------------------------------------------------------------------------
// Internal Actions
// --------------------------------------------------------------------------------------------
private boolean processFail(Throwable t, boolean isCallback) {
return processFail(t, isCallback, null, null);
}
private boolean processFail(Throwable t, boolean isCallback, Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
// damn, we failed. This means only that we keep our books and notify our parent JobExecutionVertex
// the actual computation on the task manager is cleaned up by the TaskManager that noticed the failure
// we may need to loop multiple times (in the presence of concurrent calls) in order to
// atomically switch to failed
assertRunningInJobMasterMainThread();
while (true) {
ExecutionState current = this.state;
if (current == FAILED) {
// already failed. It is enough to remember once that we failed (its sad enough)
return false;
}
if (current == CANCELED || current == FINISHED) {
// we are already aborting or are already aborted or we are already finished
if (LOG.isDebugEnabled()) {
LOG.debug("Ignoring transition of vertex {} to {} while being {}.", getVertexWithAttempt(), FAILED, current);
}
return false;
}
if (current == CANCELING) {
completeCancelling(userAccumulators, metrics);
return false;
}
if (transitionState(current, FAILED, t)) {
// success (in a manner of speaking)
this.failureCause = t;
updateAccumulatorsAndMetrics(userAccumulators, metrics);
releaseAssignedResource(t);
vertex.getExecutionGraph().deregisterExecution(this);
stopTrackingAndReleasePartitions();
if (!isCallback && (current == RUNNING || current == DEPLOYING)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending out cancel request, to remove task execution from TaskManager.");
}
try {
if (assignedResource != null) {
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
} catch (Throwable tt) {
// no reason this should ever happen, but log it to be safe
LOG.error("Error triggering cancel call while marking task {} as failed.", getVertex().getTaskNameWithSubtaskIndex(), tt);
}
}
// leave the loop
return true;
}
}
}
boolean switchToRunning() {
if (transitionState(DEPLOYING, RUNNING)) {
sendPartitionInfos();
return true;
}
else {
// something happened while the call was in progress.
// it can mean:
// - canceling, while deployment was in progress. state is now canceling, or canceled, if the response overtook
// - finishing (execution and finished call overtook the deployment answer, which is possible and happens for fast tasks)
// - failed (execution, failure, and failure message overtook the deployment answer)
ExecutionState currentState = this.state;
if (currentState == FINISHED || currentState == CANCELED) {
// do nothing, the task was really fast (nice)
// or it was canceled really fast
}
else if (currentState == CANCELING || currentState == FAILED) {
if (LOG.isDebugEnabled()) {
// this log statement is guarded because the 'getVertexWithAttempt()' method
// performs string concatenations
LOG.debug("Concurrent canceling/failing of {} while deployment was in progress.", getVertexWithAttempt());
}
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
}
else {
String message = String.format("Concurrent unexpected state transition of task %s to %s while deployment was in progress.",
getVertexWithAttempt(), currentState);
if (LOG.isDebugEnabled()) {
LOG.debug(message);
}
// undo the deployment
sendCancelRpcCall(NUM_CANCEL_CALL_TRIES);
// record the failure
markFailed(new Exception(message));
}
return false;
}
}
/**
* This method sends a CancelTask message to the instance of the assigned slot.
*
* <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
*/
private void sendCancelRpcCall(int numberRetries) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
CompletableFuture<Acknowledge> cancelResultFuture = FutureUtils.retry(
() -> taskManagerGateway.cancelTask(attemptId, rpcTimeout),
numberRetries,
jobMasterMainThreadExecutor);
cancelResultFuture.whenComplete(
(ack, failure) -> {
if (failure != null) {
fail(new Exception("Task could not be canceled.", failure));
}
});
}
}
private void startTrackingPartitions(final ResourceID taskExecutorId, final Collection<ResultPartitionDeploymentDescriptor> partitions) {
PartitionTracker partitionTracker = vertex.getExecutionGraph().getPartitionTracker();
for (ResultPartitionDeploymentDescriptor partition : partitions) {
partitionTracker.startTrackingPartition(
taskExecutorId,
partition);
}
}
void stopTrackingAndReleasePartitions() {
LOG.info("Discarding the results produced by task execution {}.", attemptId);
if (producedPartitions != null && producedPartitions.size() > 0) {
final PartitionTracker partitionTracker = getVertex().getExecutionGraph().getPartitionTracker();
final List<ResultPartitionID> producedPartitionIds = producedPartitions.values().stream()
.map(ResultPartitionDeploymentDescriptor::getShuffleDescriptor)
.map(ShuffleDescriptor::getResultPartitionID)
.collect(Collectors.toList());
partitionTracker.stopTrackingAndReleasePartitions(producedPartitionIds);
}
}
/**
* Update the partition infos on the assigned resource.
*
* @param partitionInfos for the remote task
*/
private void sendUpdatePartitionInfoRpcCall(
final Iterable<PartitionInfo> partitionInfos) {
final LogicalSlot slot = assignedResource;
if (slot != null) {
final TaskManagerGateway taskManagerGateway = slot.getTaskManagerGateway();
final TaskManagerLocation taskManagerLocation = slot.getTaskManagerLocation();
CompletableFuture<Acknowledge> updatePartitionsResultFuture = taskManagerGateway.updatePartitions(attemptId, partitionInfos, rpcTimeout);
updatePartitionsResultFuture.whenCompleteAsync(
(ack, failure) -> {
// fail if there was a failure
if (failure != null) {
fail(new IllegalStateException("Update task on TaskManager " + taskManagerLocation +
" failed due to:", failure));
}
}, getVertex().getExecutionGraph().getJobMasterMainThreadExecutor());
}
}
/**
* Releases the assigned resource and completes the release future
* once the assigned resource has been successfully released.
*
* @param cause for the resource release, null if none
*/
private void releaseAssignedResource(@Nullable Throwable cause) {
assertRunningInJobMasterMainThread();
final LogicalSlot slot = assignedResource;
if (slot != null) {
ComponentMainThreadExecutor jobMasterMainThreadExecutor =
getVertex().getExecutionGraph().getJobMasterMainThreadExecutor();
slot.releaseSlot(cause)
.whenComplete((Object ignored, Throwable throwable) -> {
jobMasterMainThreadExecutor.assertRunningInMainThread();
if (throwable != null) {
releaseFuture.completeExceptionally(throwable);
} else {
releaseFuture.complete(null);
}
});
} else {
// no assigned resource --> we can directly complete the release future
releaseFuture.complete(null);
}
}
// --------------------------------------------------------------------------------------------
// Miscellaneous
// --------------------------------------------------------------------------------------------
/**
* Calculates the preferred locations based on the location preference constraint.
*
* @param locationPreferenceConstraint constraint for the location preference
* @return Future containing the collection of preferred locations. This might not be completed if not all inputs
* have been a resource assigned.
*/
@VisibleForTesting
public CompletableFuture<Collection<TaskManagerLocation>> calculatePreferredLocations(LocationPreferenceConstraint locationPreferenceConstraint) {
final Collection<CompletableFuture<TaskManagerLocation>> preferredLocationFutures = getVertex().getPreferredLocations();
final CompletableFuture<Collection<TaskManagerLocation>> preferredLocationsFuture;
switch(locationPreferenceConstraint) {
case ALL:
preferredLocationsFuture = FutureUtils.combineAll(preferredLocationFutures);
break;
case ANY:
final ArrayList<TaskManagerLocation> completedTaskManagerLocations = new ArrayList<>(preferredLocationFutures.size());
for (CompletableFuture<TaskManagerLocation> preferredLocationFuture : preferredLocationFutures) {
if (preferredLocationFuture.isDone() && !preferredLocationFuture.isCompletedExceptionally()) {
final TaskManagerLocation taskManagerLocation = preferredLocationFuture.getNow(null);
if (taskManagerLocation == null) {
throw new FlinkRuntimeException("TaskManagerLocationFuture was completed with null. This indicates a programming bug.");
}
completedTaskManagerLocations.add(taskManagerLocation);
}
}
preferredLocationsFuture = CompletableFuture.completedFuture(completedTaskManagerLocations);
break;
default:
throw new RuntimeException("Unknown LocationPreferenceConstraint " + locationPreferenceConstraint + '.');
}
return preferredLocationsFuture;
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState) {
return transitionState(currentState, targetState, null);
}
private boolean transitionState(ExecutionState currentState, ExecutionState targetState, Throwable error) {
// sanity check
if (currentState.isTerminal()) {
throw new IllegalStateException("Cannot leave terminal state " + currentState + " to transition to " + targetState + '.');
}
if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
markTimestamp(targetState);
if (error == null) {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState);
} else {
LOG.info("{} ({}) switched from {} to {}.", getVertex().getTaskNameWithSubtaskIndex(), getAttemptId(), currentState, targetState, error);
}
if (targetState.isTerminal()) {
// complete the terminal state future
terminalStateFuture.complete(targetState);
}
// make sure that the state transition completes normally.
// potential errors (in listeners may not affect the main logic)
try {
vertex.notifyStateTransition(this, targetState, error);
}
catch (Throwable t) {
LOG.error("Error while notifying execution graph of execution state transition.", t);
}
return true;
} else {
return false;
}
}
private void markTimestamp(ExecutionState state) {
markTimestamp(state, System.currentTimeMillis());
}
private void markTimestamp(ExecutionState state, long timestamp) {
this.stateTimestamps[state.ordinal()] = timestamp;
}
public String getVertexWithAttempt() {
return vertex.getTaskNameWithSubtaskIndex() + " - execution #" + attemptNumber;
}
// ------------------------------------------------------------------------
// Accumulators
// ------------------------------------------------------------------------
/**
* Update accumulators (discarded when the Execution has already been terminated).
* @param userAccumulators the user accumulators
*/
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
synchronized (accumulatorLock) {
if (!state.isTerminal()) {
this.userAccumulators = userAccumulators;
}
}
}
public Map<String, Accumulator<?, ?>> getUserAccumulators() {
return userAccumulators;
}
@Override
public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() {
Map<String, OptionalFailure<Accumulator<?, ?>>> accumulators =
userAccumulators == null ?
null :
userAccumulators.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> OptionalFailure.of(entry.getValue())));
return StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulators);
}
@Override
public int getParallelSubtaskIndex() {
return getVertex().getParallelSubtaskIndex();
}
@Override
public IOMetrics getIOMetrics() {
return ioMetrics;
}
private void updateAccumulatorsAndMetrics(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {
if (userAccumulators != null) {
synchronized (accumulatorLock) {
this.userAccumulators = userAccumulators;
}
}
if (metrics != null) {
this.ioMetrics = metrics;
}
}
// ------------------------------------------------------------------------
// Standard utilities
// ------------------------------------------------------------------------
@Override
public String toString() {
final LogicalSlot slot = assignedResource;
return String.format("Attempt #%d (%s) @ %s - [%s]", attemptNumber, vertex.getTaskNameWithSubtaskIndex(),
(slot == null ? "(unassigned)" : slot), state);
}
@Override
public ArchivedExecution archive() {
return new ArchivedExecution(this);
}
private void assertRunningInJobMasterMainThread() {
vertex.getExecutionGraph().assertRunningInJobMasterMainThread();
}
}
|
Java
|
/************************************************************************************
* XtApp.h : header file
*
* CXtApp Definition header, An Application framework for common use
* (Xtrovert Application Frameworks).
*
* AUTHOR : Sean Feng <SeanFeng2006@hotmail.com>
* DATE : Aug. 7, 2012
* Copyright (c) 2009-?. All Rights Reserved.
*
* This code may be used in compiled form in any way you desire. This
* file may be redistributed unmodified by any means PROVIDING it is
* not sold for profit without the authors written consent, and
* providing that this notice and the authors name and all copyright
* notices remains int act.
*
* An email letting me know how you are using it would be nice as well.
*
* This file is provided "as is" with no expressed or implied warranty.
* The author accepts no liability for any damage/loss of business that
* this product may cause.
*
************************************************************************************/
#ifndef __XT_APPLICATION_H__
#define __XT_APPLICATION_H__
#include "XtThread.h"
/*
Application Parameter:
-buildinfo(bi) Compiler, Platform(Win/Linux, 32/64bits), Built date.
-start/stop/restart
-reload
*/
//////////////////////////////////////////////////////////////////////////
// CLASS CXtApp
//////////////////////////////////////////////////////////////////////////
/* Terminate flag */
enum {
XTAPP_TF_STOP=0,
XTAPP_TF_RUNNING,
XTAPP_TF_RESTART,
XTAPP_TF_END
};
/* _T('-') */
const TCHAR XT_STRCMD_DIRECTIVE_BRIEF;
/* Complete Directive_T("--") [CPLT means Complete] */
const TCHAR XT_STRCMD_DIRECTIVE_CPLT[2]; /* _T("--") */
/* _T('/') */
const TCHAR XT_STRCMD_DIRECTIVE_SLASH; /* Compatible to Windows _T('/') */
class CXtApp : public CXtThread
{
public:
CXtApp(void);
virtual ~CXtApp(void);
// System Environment initialization/destruction.
int InitApplication( int argc, TCHAR *argv[] );
int ExitApplication(void);
// Application logics initialization/destruction.
int InitInstance(void);
int ExitInstance(void); // return app exit code
int RestartInstance(void); // handle restart by Restart Manager
int Run(void);
/* Administrator Mode */
/*int OptCmdAdmin(void);*/
/*int CmdNetState(void);*/
/*int CmdNetConnect( TCHAR *szAddress );*/
/*int CmdNetRestart(void);*/
/*int CmdNetPing( TCHAR *szAddress );*/
static int m_nTermFlag;
static CXtApp *m_pThisApp;
#if defined(_DEBUG)
void SetLabel( const TCHAR *szLabel );
TCHAR m_szLabel[_MAX_STR_LEN_32_];
#endif
protected:
virtual void Reset(void);
// virtual BOOL OnIdle( LONG lCount ); // return TRUE if more idle processing
/*****************************************************************
* InitApplication() is implemented with the following methods.
* ExitApplication() is implemented with the following methods.
******************************************************************/
/* React to a shell-issued command line directive. */
virtual int ProcessShellCommand( int argc, TCHAR *argv[] );
virtual BOOL GetShellCommand( int argc, TCHAR* argv[], const TCHAR **cszOption, const TCHAR **cszParam );
/* Decide whether process runs under Daemon Mode. */
virtual void SetupDaemonMode(void); /* Setup : m_bDaemon = TRUE/FALSE */
/* Do something extra in derived-class. */
virtual int OnInitApp(void);
virtual int OnExitApp(void);
/*****************************************************************
* InitApplication()/ExitApplication() implementation END
******************************************************************/
/*****************************************************************
* InitInstance() is implemented with the following methods.
* ExitInstance() is implemented with the following methods.
******************************************************************/
int Daemon(void);
virtual int OnInitInstance(void);
virtual int OnExitInstance(void);
/*****************************************************************
* InitInstance()/ExitInstance() implementation END
******************************************************************/
/*****************************************************************
* Run() is implemented with the following methods.
******************************************************************/
virtual int AppProc(void);
/*****************************************************************
* Run() implementation END
******************************************************************/
/* Methods */
int GetCwd( TCHAR *szDir ); /* Get current working directory. */
int SetCwd( const TCHAR *szDir ); /* Set current working directory. */
int GetExeDir( TCHAR *szDir ); /* Get directory where exe-file lies in. */
const TCHAR* GetCmdLineString(void); /* Get command line string, that how to start this program. */
static void GotTerminate( int sig );
/*virtual int WaitThreads(void);*/
/* This process is running under daemon mode or not? */
BOOL m_bDaemon; /* SetupDaemonMode() setup this member. */
BOOL m_bRestart;
private:
};
#endif /*__XT_APPLICATION_H__*/
|
Java
|
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="SHORTCUT ICON" href="../../../../../img/clover.ico" />
<link rel="stylesheet" href="../../../../../aui/css/aui.min.css" media="all"/>
<link rel="stylesheet" href="../../../../../aui/css/aui-experimental.min.css" media="all"/>
<!--[if IE 9]><link rel="stylesheet" href="../../../../../aui/css/aui-ie9.min.css" media="all"/><![endif]-->
<style type="text/css" media="all">
@import url('../../../../../style.css');
@import url('../../../../../tree.css');
</style>
<script src="../../../../../jquery-1.8.3.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-experimental.min.js" type="text/javascript"></script>
<script src="../../../../../aui/js/aui-soy.min.js" type="text/javascript"></script>
<script src="../../../../../package-nodes-tree.js" type="text/javascript"></script>
<script src="../../../../../clover-tree.js" type="text/javascript"></script>
<script src="../../../../../clover.js" type="text/javascript"></script>
<script src="../../../../../clover-descriptions.js" type="text/javascript"></script>
<script src="../../../../../cloud.js" type="text/javascript"></script>
<title>ABA Route Transit Number Validator 1.0.1-SNAPSHOT</title>
</head>
<body>
<div id="page">
<header id="header" role="banner">
<nav class="aui-header aui-dropdown2-trigger-group" role="navigation">
<div class="aui-header-inner">
<div class="aui-header-primary">
<h1 id="logo" class="aui-header-logo aui-header-logo-clover">
<a href="http://openclover.org" title="Visit OpenClover home page"><span class="aui-header-logo-device">OpenClover</span></a>
</h1>
</div>
<div class="aui-header-secondary">
<ul class="aui-nav">
<li id="system-help-menu">
<a class="aui-nav-link" title="Open online documentation" target="_blank"
href="http://openclover.org/documentation">
<span class="aui-icon aui-icon-small aui-iconfont-help"> Help</span>
</a>
</li>
</ul>
</div>
</div>
</nav>
</header>
<div class="aui-page-panel">
<div class="aui-page-panel-inner">
<div class="aui-page-panel-nav aui-page-panel-nav-clover">
<div class="aui-page-header-inner" style="margin-bottom: 20px;">
<div class="aui-page-header-image">
<a href="http://cardatechnologies.com" target="_top">
<div class="aui-avatar aui-avatar-large aui-avatar-project">
<div class="aui-avatar-inner">
<img src="../../../../../img/clover_logo_large.png" alt="Clover icon"/>
</div>
</div>
</a>
</div>
<div class="aui-page-header-main" >
<h1>
<a href="http://cardatechnologies.com" target="_top">
ABA Route Transit Number Validator 1.0.1-SNAPSHOT
</a>
</h1>
</div>
</div>
<nav class="aui-navgroup aui-navgroup-vertical">
<div class="aui-navgroup-inner">
<ul class="aui-nav">
<li class="">
<a href="../../../../../dashboard.html">Project overview</a>
</li>
</ul>
<div class="aui-nav-heading packages-nav-heading">
<strong>Packages</strong>
</div>
<div class="aui-nav project-packages">
<form method="get" action="#" class="aui package-filter-container">
<input type="text" autocomplete="off" class="package-filter text"
placeholder="Type to filter packages..." name="package-filter" id="package-filter"
title="Start typing package name (or part of the name) to search through the tree. Use arrow keys and the Enter key to navigate."/>
</form>
<p class="package-filter-no-results-message hidden">
<small>No results found.</small>
</p>
<div class="packages-tree-wrapper" data-root-relative="../../../../../" data-package-name="com.cardatechnologies.utils.validators.abaroutevalidator">
<div class="packages-tree-container"></div>
<div class="clover-packages-lozenges"></div>
</div>
</div>
</div>
</nav> </div>
<section class="aui-page-panel-content">
<div class="aui-page-panel-content-clover">
<div class="aui-page-header-main"><ol class="aui-nav aui-nav-breadcrumbs">
<li><a href="../../../../../dashboard.html"> Project Clover database Sat Aug 7 2021 12:29:33 MDT</a></li>
<li><a href="test-pkg-summary.html">Package com.cardatechnologies.utils.validators.abaroutevalidator</a></li>
<li><a href="test-Test_AbaRouteValidator_07.html">Class Test_AbaRouteValidator_07</a></li>
</ol></div>
<h1 class="aui-h2-clover">
Test testAbaNumberCheck_13433_good
</h1>
<table class="aui">
<thead>
<tr>
<th>Test</th>
<th><label title="The test result. Either a Pass, Fail or Error.">Status</label></th>
<th><label title="When the test execution was started">Start time</label></th>
<th><label title="The total time in seconds taken to run this test.">Time (seconds)</label></th>
<th><label title="A failure or error message if the test is not successful.">Message</label></th>
</tr>
</thead>
<tbody>
<tr>
<td>
<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_07.html?line=23656#src-23656" >testAbaNumberCheck_13433_good</a>
</td>
<td>
<span class="sortValue">1</span><span class="aui-lozenge aui-lozenge-success">PASS</span>
</td>
<td>
7 Aug 12:36:43
</td>
<td>
0.0 </td>
<td>
<div></div>
<div class="errorMessage"></div>
</td>
</tr>
</tbody>
</table>
<div> </div>
<table class="aui aui-table-sortable">
<thead>
<tr>
<th style="white-space:nowrap;"><label title="A class that was directly hit by this test.">Target Class</label></th>
<th colspan="4"><label title="The percentage of coverage contributed by each single test.">Coverage contributed by</label> testAbaNumberCheck_13433_good</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</span>
  <a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/AbaRouteValidator.html?id=31840#AbaRouteValidator" title="AbaRouteValidator" name="sl-47">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</a>
</td>
<td>
<span class="sortValue">0.7352941</span>73.5%
</td>
<td class="align-middle" style="width: 100%" colspan="3">
<div>
<div title="73.5% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:73.5%"></div></div></div> </td>
</tr>
</tbody>
</table>
</div> <!-- class="aui-page-panel-content-clover" -->
<footer id="footer" role="contentinfo">
<section class="footer-body">
<ul>
<li>
Report generated by <a target="_new" href="http://openclover.org">OpenClover</a> v 4.4.1
on Sat Aug 7 2021 12:49:26 MDT using coverage data from Sat Aug 7 2021 12:47:23 MDT.
</li>
</ul>
<ul>
<li>OpenClover is free and open-source software. </li>
</ul>
</section>
</footer> </section> <!-- class="aui-page-panel-content" -->
</div> <!-- class="aui-page-panel-inner" -->
</div> <!-- class="aui-page-panel" -->
</div> <!-- id="page" -->
</body>
</html>
|
Java
|
package water;
import java.io.*;
import java.lang.reflect.Array;
import java.net.*;
import java.nio.*;
import java.nio.channels.*;
import java.util.ArrayList;
import java.util.Random;
import water.network.SocketChannelUtils;
import water.util.Log;
import water.util.StringUtils;
import water.util.TwoDimTable;
/** A ByteBuffer backed mixed Input/Output streaming class, using Iced serialization.
*
* Reads/writes empty/fill the ByteBuffer as needed. When it is empty/full it
* we go to the ByteChannel for more/less. Because DirectByteBuffers are
* expensive to make, we keep a few pooled.
*
* When talking to a remote H2O node, switches between UDP and TCP transport
* protocols depending on the message size. The TypeMap is not included, and
* is assumed to exist on the remote H2O node.
*
* Supports direct NIO FileChannel read/write to disk, used during user-mode
* swapping. The TypeMap is not included on write, and is assumed to be the
* current map on read.
*
* Support read/write from byte[] - and this defeats the purpose of a
* Streaming protocol, but is frequently handy for small structures. The
* TypeMap is not included, and is assumed to be the current map on read.
*
* Supports read/write from a standard Stream, which by default assumes it is
* NOT going in and out of the same Cloud, so the TypeMap IS included. The
* serialized object can only be read back into the same minor version of H2O.
*
* @author <a href="mailto:cliffc@h2o.ai"></a>
*/
public final class AutoBuffer {
// Maximum size of an array we allow to allocate (the value is designed
// to mimic the behavior of OpenJDK libraries)
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
// The direct ByteBuffer for schlorping data about.
// Set to null to indicate the AutoBuffer is closed.
ByteBuffer _bb;
public String sourceName = "???";
public boolean isClosed() { return _bb == null ; }
// The ByteChannel for moving data in or out. Could be a SocketChannel (for
// a TCP connection) or a FileChannel (spill-to-disk) or a DatagramChannel
// (for a UDP connection). Null on closed AutoBuffers. Null on initial
// remote-writing AutoBuffers which are still deciding UDP vs TCP. Not-null
// for open AutoBuffers doing file i/o or reading any TCP/UDP or having
// written at least one buffer to TCP/UDP.
private Channel _chan;
// A Stream for moving data in. Null unless this AutoBuffer is
// stream-based, in which case _chan field is null. This path supports
// persistance: reading and writing objects from different H2O cluster
// instances (but exactly the same H2O revision). The only required
// similarity is same-classes-same-fields; changes here will probably
// silently crash. If the fields are named the same but the semantics
// differ, then again the behavior is probably silent crash.
private InputStream _is;
private short[] _typeMap; // Mapping from input stream map to current map, or null
// If we need a SocketChannel, raise the priority so we get the I/O over
// with. Do not want to have some TCP socket open, blocking the TCP channel
// and then have the thread stalled out. If we raise the priority - be sure
// to lower it again. Note this is for TCP channels ONLY, and only because
// we are blocking another Node with I/O.
private int _oldPrior = -1;
// Where to send or receive data via TCP or UDP (choice made as we discover
// how big the message is); used to lazily create a Channel. If NULL, then
// _chan should be a pre-existing Channel, such as a FileChannel.
final H2ONode _h2o;
// TRUE for read-mode. FALSE for write-mode. Can be flipped for rapid turnaround.
private boolean _read;
// TRUE if this AutoBuffer has never advanced past the first "page" of data.
// The UDP-flavor, port# and task fields are only valid until we read over
// them when flipping the ByteBuffer to the next chunk of data. Used in
// asserts all over the place.
private boolean _firstPage;
// Total size written out from 'new' to 'close'. Only updated when actually
// reading or writing data, or after close(). For profiling only.
int _size;
//int _zeros, _arys;
// More profiling: start->close msec, plus nano's spent in blocking I/O
// calls. The difference between (close-start) and i/o msec is the time the
// i/o thread spends doing other stuff (e.g. allocating Java objects or
// (de)serializing).
long _time_start_ms, _time_close_ms, _time_io_ns;
// I/O persistence flavor: Value.ICE, NFS, HDFS, S3, TCP. Used to record I/O time.
final byte _persist;
// The assumed max UDP packetsize
static final int MTU = 1500-8/*UDP packet header size*/;
// Enable this to test random TCP fails on open or write
static final Random RANDOM_TCP_DROP = null; //new Random();
static final java.nio.charset.Charset UTF_8 = java.nio.charset.Charset.forName("UTF-8");
/** Incoming UDP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read. */
AutoBuffer( DatagramChannel sock ) throws IOException {
_chan = null;
_bb = BBP_SML.make(); // Get a small / UDP-sized ByteBuffer
_read = true; // Reading by default
_firstPage = true;
// Read a packet; can get H2ONode from 'sad'?
Inet4Address addr = null;
SocketAddress sad = sock.receive(_bb);
if( sad instanceof InetSocketAddress ) {
InetAddress address = ((InetSocketAddress) sad).getAddress();
if( address instanceof Inet4Address ) {
addr = (Inet4Address) address;
}
}
_size = _bb.position();
_bb.flip(); // Set limit=amount read, and position==0
if( addr == null ) throw new RuntimeException("Unhandled socket type: " + sad);
// Read Inet from socket, port from the stream, figure out H2ONode
_h2o = H2ONode.intern(addr, getPort());
_firstPage = true;
assert _h2o != null;
_persist = 0; // No persistance
}
/** Incoming TCP request. Make a read-mode AutoBuffer from the open Channel,
* figure the originating H2ONode from the first few bytes read.
*
* remoteAddress set to null means that the communication is originating from non-h2o node, non-null value
* represents the case where the communication is coming from h2o node.
* */
AutoBuffer( ByteChannel sock, InetAddress remoteAddress ) throws IOException {
_chan = sock;
raisePriority(); // Make TCP priority high
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_bb.flip();
_read = true; // Reading by default
_firstPage = true;
// Read Inet from socket, port from the stream, figure out H2ONode
if(remoteAddress!=null) {
_h2o = H2ONode.intern(remoteAddress, getPort());
}else{
// In case the communication originates from non-h2o node, we set _h2o node to null.
// It is done for 2 reasons:
// - H2ONode.intern creates a new thread and if there's a lot of connections
// from non-h2o environment, it could end up with too many open files exception.
// - H2OIntern also reads port (getPort()) and additional information which we do not send
// in communication originating from non-h2o nodes
_h2o = null;
}
_firstPage = true; // Yes, must reset this.
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
}
/** Make an AutoBuffer to write to an H2ONode. Requests for full buffer will
* open a TCP socket and roll through writing to the target. Smaller
* requests will send via UDP. Small requests get ordered by priority, so
* that e.g. NACK and ACKACK messages have priority over most anything else.
* This helps in UDP floods to shut down flooding senders. */
private byte _msg_priority;
AutoBuffer( H2ONode h2o, byte priority ) {
// If UDP goes via UDP, we write into a DBB up front - because we plan on
// sending it out via a Datagram socket send call. If UDP goes via batched
// TCP, we write into a HBB up front, because this will be copied again
// into a large outgoing buffer.
_bb = H2O.ARGS.useUDP // Actually use UDP?
? BBP_SML.make() // Make DirectByteBuffers to start with
: ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null; // Channel made lazily only if we write alot
_h2o = h2o;
_read = false; // Writing by default
_firstPage = true; // Filling first page
assert _h2o != null;
_time_start_ms = System.currentTimeMillis();
_persist = Value.TCP;
_msg_priority = priority;
}
/** Spill-to/from-disk request. */
public AutoBuffer( FileChannel fc, boolean read, byte persist ) {
_bb = BBP_BIG.make(); // Get a big / TPC-sized ByteBuffer
_chan = fc; // Write to read/write
_h2o = null; // File Channels never have an _h2o
_read = read; // Mostly assert reading vs writing
if( read ) _bb.flip();
_time_start_ms = System.currentTimeMillis();
_persist = persist; // One of Value.ICE, NFS, S3, HDFS
}
/** Read from UDP multicast. Same as the byte[]-read variant, except there is an H2O. */
AutoBuffer( DatagramPacket pack ) {
_size = pack.getLength();
_bb = ByteBuffer.wrap(pack.getData(), 0, pack.getLength()).order(ByteOrder.nativeOrder());
_bb.position(0);
_read = true;
_firstPage = true;
_chan = null;
_h2o = H2ONode.intern(pack.getAddress(), getPort());
_persist = 0; // No persistance
}
/** Read from a UDP_TCP buffer; could be in the middle of a large buffer */
AutoBuffer( H2ONode h2o, byte[] buf, int off, int len ) {
assert buf != null : "null fed to ByteBuffer.wrap";
_h2o = h2o;
_bb = ByteBuffer.wrap(buf,off,len).order(ByteOrder.nativeOrder());
_chan = null;
_read = true;
_firstPage = true;
_persist = 0; // No persistance
_size = len;
}
/** Read from a fixed byte[]; should not be closed. */
public AutoBuffer( byte[] buf ) { this(null,buf,0, buf.length); }
/** Write to an ever-expanding byte[]. Instead of calling {@link #close()},
* call {@link #buf()} to retrieve the final byte[]. */
public AutoBuffer( ) {
_bb = ByteBuffer.wrap(new byte[16]).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a known sized byte[]. Instead of calling close(), call
* {@link #bufClose()} to retrieve the final byte[]. */
public AutoBuffer( int len ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(len)).order(ByteOrder.nativeOrder());
_chan = null;
_h2o = null;
_read = false;
_firstPage = true;
_persist = 0; // No persistance
}
/** Write to a persistent Stream, including all TypeMap info to allow later
* reloading (by the same exact rev of H2O). */
public AutoBuffer( OutputStream os, boolean persist ) {
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_read = false;
_chan = Channels.newChannel(os);
_h2o = null;
_firstPage = true;
_persist = 0;
if( persist ) put1(0x1C).put1(0xED).putStr(H2O.ABV.projectVersion()).putAStr(TypeMap.CLAZZES);
else put1(0);
}
/** Read from a persistent Stream (including all TypeMap info) into same
* exact rev of H2O). */
public AutoBuffer( InputStream is ) {
_chan = null;
_h2o = null;
_firstPage = true;
_persist = 0;
_read = true;
_bb = ByteBuffer.wrap(MemoryManager.malloc1(BBP_BIG._size)).order(ByteOrder.nativeOrder());
_bb.flip();
_is = is;
int b = get1U();
if( b==0 ) return; // No persistence info
int magic = get1U();
if( b!=0x1C || magic != 0xED ) throw new IllegalArgumentException("Missing magic number 0x1CED at stream start");
String version = getStr();
if( !version.equals(H2O.ABV.projectVersion()) )
throw new IllegalArgumentException("Found version "+version+", but running version "+H2O.ABV.projectVersion());
String[] typeMap = getAStr();
_typeMap = new short[typeMap.length];
for( int i=0; i<typeMap.length; i++ )
_typeMap[i] = (short)(typeMap[i]==null ? 0 : TypeMap.onIce(typeMap[i]));
}
@Override public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[AB ").append(_read ? "read " : "write ");
sb.append(_firstPage?"first ":"2nd ").append(_h2o);
sb.append(" ").append(Value.nameOfPersist(_persist));
if( _bb != null ) sb.append(" 0 <= ").append(_bb.position()).append(" <= ").append(_bb.limit());
if( _bb != null ) sb.append(" <= ").append(_bb.capacity());
return sb.append("]").toString();
}
// Fetch a DBB from an object pool... they are fairly expensive to make
// because a native call is required to get the backing memory. I've
// included BB count tracking code to help track leaks. As of 12/17/2012 the
// leaks are under control, but figure this may happen again so keeping these
// counters around.
//
// We use 2 pool sizes: lots of small UDP packet-sized buffers and fewer
// larger TCP-sized buffers.
private static final boolean DEBUG = Boolean.getBoolean("h2o.find-ByteBuffer-leaks");
private static long HWM=0;
static class BBPool {
long _made, _cached, _freed;
long _numer, _denom, _goal=4*H2O.NUMCPUS, _lastGoal;
final ArrayList<ByteBuffer> _bbs = new ArrayList<>();
final int _size; // Big or small size of ByteBuffers
BBPool( int sz) { _size=sz; }
private ByteBuffer stats( ByteBuffer bb ) {
if( !DEBUG ) return bb;
if( ((_made+_cached)&255)!=255 ) return bb; // Filter printing to 1 in 256
long now = System.currentTimeMillis();
if( now < HWM ) return bb;
HWM = now+1000;
water.util.SB sb = new water.util.SB();
sb.p("BB").p(this==BBP_BIG?1:0).p(" made=").p(_made).p(" -freed=").p(_freed).p(", cache hit=").p(_cached).p(" ratio=").p(_numer/_denom).p(", goal=").p(_goal).p(" cache size=").p(_bbs.size()).nl();
for( int i=0; i<H2O.MAX_PRIORITY; i++ ) {
int x = H2O.getWrkQueueSize(i);
if( x > 0 ) sb.p('Q').p(i).p('=').p(x).p(' ');
}
Log.warn(sb.nl().toString());
return bb;
}
ByteBuffer make() {
while( true ) { // Repeat loop for DBB OutOfMemory errors
ByteBuffer bb=null;
synchronized(_bbs) {
int sz = _bbs.size();
if( sz > 0 ) { bb = _bbs.remove(sz-1); _cached++; _numer++; }
}
if( bb != null ) return stats(bb);
// Cache empty; go get one from C/Native memory
try {
bb = ByteBuffer.allocateDirect(_size).order(ByteOrder.nativeOrder());
synchronized(this) { _made++; _denom++; _goal = Math.max(_goal,_made-_freed); _lastGoal=System.nanoTime(); } // Goal was too low, raise it
return stats(bb);
} catch( OutOfMemoryError oome ) {
// java.lang.OutOfMemoryError: Direct buffer memory
if( !"Direct buffer memory".equals(oome.getMessage()) ) throw oome;
System.out.println("OOM DBB - Sleeping & retrying");
try { Thread.sleep(100); } catch( InterruptedException ignore ) { }
}
}
}
void free(ByteBuffer bb) {
// Heuristic: keep the ratio of BB's made to cache-hits at a fixed level.
// Free to GC if ratio is high, free to internal cache if low.
long ratio = _numer/(_denom+1);
synchronized(_bbs) {
if( ratio < 100 || _bbs.size() < _goal ) { // low hit/miss ratio or below goal
bb.clear(); // Clear-before-add
_bbs.add(bb);
} else _freed++; // Toss the extras (above goal & ratio)
long now = System.nanoTime();
if( now-_lastGoal > 1000000000L ) { // Once/sec, drop goal by 10%
_lastGoal = now;
if( ratio > 110 ) // If ratio is really high, lower goal
_goal=Math.max(4*H2O.NUMCPUS,(long)(_goal*0.99));
// Once/sec, lower numer/denom... means more recent activity outweighs really old stuff
long denom = (long) (0.99 * _denom); // Proposed reduction
if( denom > 10 ) { // Keep a little precision
_numer = (long) (0.99 * _numer); // Keep ratio between made & cached the same
_denom = denom; // ... by lowering both by 10%
}
}
}
}
static int FREE( ByteBuffer bb ) {
if(bb.isDirect())
(bb.capacity()==BBP_BIG._size ? BBP_BIG : BBP_SML).free(bb);
return 0; // Flow coding
}
}
static BBPool BBP_SML = new BBPool( 2*1024); // Bytebuffer "common small size", for UDP
static BBPool BBP_BIG = new BBPool(64*1024); // Bytebuffer "common big size", for TCP
public static int TCP_BUF_SIZ = BBP_BIG._size;
private int bbFree() {
if(_bb != null && _bb.isDirect())
BBPool.FREE(_bb);
_bb = null;
return 0; // Flow-coding
}
// You thought TCP was a reliable protocol, right? WRONG! Fails 100% of the
// time under heavy network load. Connection-reset-by-peer & connection
// timeouts abound, even after a socket open and after a 1st successful
// ByteBuffer write. It *appears* that the reader is unaware that a writer
// was told "go ahead and write" by the TCP stack, so all these fails are
// only on the writer-side.
public static class AutoBufferException extends RuntimeException {
public final IOException _ioe;
AutoBufferException( IOException ioe ) { _ioe = ioe; }
}
// For reads, just assert all was read and close and release resources.
// (release ByteBuffer back to the common pool). For writes, force any final
// bytes out. If the write is to an H2ONode and is short, send via UDP.
// AutoBuffer close calls order; i.e. a reader close() will block until the
// writer does a close().
public final int close() {
//if( _size > 2048 ) System.out.println("Z="+_zeros+" / "+_size+", A="+_arys);
if( isClosed() ) return 0; // Already closed
assert _h2o != null || _chan != null || _is != null; // Byte-array backed should not be closed
try {
if( _chan == null ) { // No channel?
if( _read ) {
if( _is != null ) _is.close();
return 0;
} else { // Write
// For small-packet write, send via UDP. Since nothing is sent until
// now, this close() call trivially orders - since the reader will not
// even start (much less close()) until this packet is sent.
if( _bb.position() < MTU) return udpSend();
// oops - Big Write, switch to TCP and finish out there
}
}
// Force AutoBuffer 'close' calls to order; i.e. block readers until
// writers do a 'close' - by writing 1 more byte in the close-call which
// the reader will have to wait for.
if( hasTCP()) { // TCP connection?
try {
if( _read ) { // Reader?
int x = get1U(); // Read 1 more byte
assert x == 0xab : "AB.close instead of 0xab sentinel got "+x+", "+this;
assert _chan != null; // chan set by incoming reader, since we KNOW it is a TCP
// Write the reader-handshake-byte.
SocketChannelUtils.underlyingSocketChannel(_chan).socket().getOutputStream().write(0xcd);
// do not close actually reader socket; recycle it in TCPReader thread
} else { // Writer?
put1(0xab); // Write one-more byte ; might set _chan from null to not-null
sendPartial(); // Finish partial writes; might set _chan from null to not-null
assert _chan != null; // _chan is set not-null now!
// Read the writer-handshake-byte.
int x = SocketChannelUtils.underlyingSocketChannel(_chan).socket().getInputStream().read();
// either TCP con was dropped or other side closed connection without reading/confirming (e.g. task was cancelled).
if( x == -1 ) throw new IOException("Other side closed connection before handshake byte read");
assert x == 0xcd : "Handshake; writer expected a 0xcd from reader but got "+x;
}
} catch( IOException ioe ) {
try { _chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now, since i/o error
throw ioe; // Rethrow after close
} finally {
if( !_read ) _h2o.freeTCPSocket((ByteChannel) _chan); // Recycle writable TCP channel
restorePriority(); // And if we raised priority, lower it back
}
} else { // FileChannel
if( !_read ) sendPartial(); // Finish partial file-system writes
_chan.close();
_chan = null; // Closed file channel
}
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
throw new AutoBufferException(e);
} finally {
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
return 0;
}
// Need a sock for a big read or write operation.
// See if we got one already, else open a new socket.
private void tcpOpen() throws IOException {
assert _firstPage && _bb.limit() >= 1+2+4; // At least something written
assert _chan == null;
// assert _bb.position()==0;
_chan = _h2o.getTCPSocket();
raisePriority();
}
// Just close the channel here without reading anything. Without the task
// object at hand we do not know what (how many bytes) should we read from
// the channel. And since the other side will try to read confirmation from
// us before closing the channel, we can not read till the end. So we just
// close the channel and let the other side to deal with it and figure out
// the task has been cancelled (still sending ack ack back).
void drainClose() {
if( isClosed() ) return; // Already closed
final Channel chan = _chan; // Read before closing
assert _h2o != null || chan != null; // Byte-array backed should not be closed
if( chan != null ) { // Channel assumed sick from prior IOException
try { chan.close(); } catch( IOException ignore ) {} // Silently close
_chan = null; // No channel now!
if( !_read && SocketChannelUtils.isSocketChannel(chan)) _h2o.freeTCPSocket((ByteChannel) chan); // Recycle writable TCP channel
}
restorePriority(); // And if we raised priority, lower it back
bbFree();
_time_close_ms = System.currentTimeMillis();
// TimeLine.record_IOclose(this,_persist); // Profile AutoBuffer connections
assert isClosed();
}
// True if we opened a TCP channel, or will open one to close-and-send
boolean hasTCP() { assert !isClosed(); return SocketChannelUtils.isSocketChannel(_chan) || (_h2o!=null && _bb.position() >= MTU); }
// Size in bytes sent, after a close()
int size() { return _size; }
//int zeros() { return _zeros; }
public int position () { return _bb.position(); }
public AutoBuffer position(int p) {_bb.position(p); return this;}
/** Skip over some bytes in the byte buffer. Caller is responsible for not
* reading off end of the bytebuffer; generally this is easy for
* array-backed autobuffers and difficult for i/o-backed bytebuffers. */
public void skip(int skip) { _bb.position(_bb.position()+skip); }
// Return byte[] from a writable AutoBuffer
public final byte[] buf() {
assert _h2o==null && _chan==null && !_read && !_bb.isDirect();
return MemoryManager.arrayCopyOfRange(_bb.array(), _bb.arrayOffset(), _bb.position());
}
public final byte[] bufClose() {
byte[] res = _bb.array();
bbFree();
return res;
}
// For TCP sockets ONLY, raise the thread priority. We assume we are
// blocking other Nodes with our network I/O, so try to get the I/O
// over with.
private void raisePriority() {
if(_oldPrior == -1){
assert SocketChannelUtils.isSocketChannel(_chan);
_oldPrior = Thread.currentThread().getPriority();
Thread.currentThread().setPriority(Thread.MAX_PRIORITY-1);
}
}
private void restorePriority() {
if( _oldPrior == -1 ) return;
Thread.currentThread().setPriority(_oldPrior);
_oldPrior = -1;
}
// Send via UDP socket. Unlike eg TCP sockets, we only need one for sending
// so we keep a global one. Also, we do not close it when done, and we do
// not connect it up-front to a target - but send the entire packet right now.
private int udpSend() throws IOException {
assert _chan == null;
TimeLine.record_send(this,false);
_size = _bb.position();
assert _size < AutoBuffer.BBP_SML._size;
_bb.flip(); // Flip for sending
if( _h2o==H2O.SELF ) { // SELF-send is the multi-cast signal
water.init.NetworkInit.multicast(_bb, _msg_priority);
} else { // Else single-cast send
if(H2O.ARGS.useUDP) // Send via UDP directly
water.init.NetworkInit.CLOUD_DGRAM.send(_bb, _h2o._key);
else // Send via bulk TCP
_h2o.sendMessage(_bb, _msg_priority);
}
return 0; // Flow-coding
}
// Flip to write-mode
AutoBuffer clearForWriting(byte priority) {
assert _read;
_read = false;
_msg_priority = priority;
_bb.clear();
_firstPage = true;
return this;
}
// Flip to read-mode
public AutoBuffer flipForReading() {
assert !_read;
_read = true;
_bb.flip();
_firstPage = true;
return this;
}
/** Ensure the buffer has space for sz more bytes */
private ByteBuffer getSp( int sz ) { return sz > _bb.remaining() ? getImpl(sz) : _bb; }
/** Ensure buffer has at least sz bytes in it.
* - Also, set position just past this limit for future reading. */
private ByteBuffer getSz(int sz) {
assert _firstPage : "getSz() is only valid for early UDP bytes";
if( sz > _bb.limit() ) getImpl(sz);
_bb.position(sz);
return _bb;
}
private ByteBuffer getImpl( int sz ) {
assert _read : "Reading from a buffer in write mode";
_bb.compact(); // Move remaining unread bytes to start of buffer; prep for reading
// Its got to fit or we asked for too much
assert _bb.position()+sz <= _bb.capacity() : "("+_bb.position()+"+"+sz+" <= "+_bb.capacity()+")";
long ns = System.nanoTime();
while( _bb.position() < sz ) { // Read until we got enuf
try {
int res = readAnInt(); // Read more
// Readers are supposed to be strongly typed and read the exact expected bytes.
// However, if a TCP connection fails mid-read we'll get a short-read.
// This is indistinguishable from a mis-alignment between the writer and reader!
if( res <= 0 )
throw new AutoBufferException(new EOFException("Reading "+sz+" bytes, AB="+this));
if( _is != null ) _bb.position(_bb.position()+res); // Advance BB for Streams manually
_size += res; // What we read
} catch( IOException e ) { // Dunno how to handle so crash-n-burn
// Linux/Ubuntu message for a reset-channel
if( e.getMessage().equals("An existing connection was forcibly closed by the remote host") )
throw new AutoBufferException(e);
// Windows message for a reset-channel
if( e.getMessage().equals("An established connection was aborted by the software in your host machine") )
throw new AutoBufferException(e);
throw Log.throwErr(e);
}
}
_time_io_ns += (System.nanoTime()-ns);
_bb.flip(); // Prep for handing out bytes
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
_firstPage = false; // First page of data is gone gone gone
return _bb;
}
private int readAnInt() throws IOException {
if (_is == null) return ((ReadableByteChannel) _chan).read(_bb);
final byte[] array = _bb.array();
final int position = _bb.position();
final int remaining = _bb.remaining();
try {
return _is.read(array, position, remaining);
} catch (IOException ioe) {
throw new IOException("Failed reading " + remaining + " bytes into buffer[" + array.length + "] at " + position + " from " + sourceName + " " + _is, ioe);
}
}
/** Put as needed to keep from overflowing the ByteBuffer. */
private ByteBuffer putSp( int sz ) {
assert !_read;
if (sz > _bb.remaining()) {
if ((_h2o == null && _chan == null) || (_bb.hasArray() && _bb.capacity() < BBP_BIG._size))
expandByteBuffer(sz);
else
sendPartial();
assert sz <= _bb.remaining();
}
return _bb;
}
// Do something with partial results, because the ByteBuffer is full.
// If we are doing I/O, ship the bytes we have now and flip the ByteBuffer.
private ByteBuffer sendPartial() {
// Doing I/O with the full ByteBuffer - ship partial results
_size += _bb.position();
if( _chan == null )
TimeLine.record_send(this, true);
_bb.flip(); // Prep for writing.
try {
if( _chan == null )
tcpOpen(); // This is a big operation. Open a TCP socket as-needed.
//for( int i=0; i < _bb.limit(); i++ ) if( _bb.get(i)==0 ) _zeros++;
long ns = System.nanoTime();
while( _bb.hasRemaining() ) {
((WritableByteChannel) _chan).write(_bb);
if( RANDOM_TCP_DROP != null && SocketChannelUtils.isSocketChannel(_chan) && RANDOM_TCP_DROP.nextInt(100) == 0 )
throw new IOException("Random TCP Write Fail");
}
_time_io_ns += (System.nanoTime()-ns);
} catch( IOException e ) { // Some kind of TCP fail?
// Change to an unchecked exception (so we don't have to annotate every
// frick'n put1/put2/put4/read/write call). Retry & recovery happens at
// a higher level. AutoBuffers are used for many things including e.g.
// disk i/o & UDP writes; this exception only happens on a failed TCP
// write - and we don't want to make the other AutoBuffer users have to
// declare (and then ignore) this exception.
throw new AutoBufferException(e);
}
_firstPage = false;
_bb.clear();
return _bb;
}
// Called when the byte buffer doesn't have enough room
// If buffer is array backed, and the needed room is small,
// increase the size of the backing array,
// otherwise dump into a large direct buffer
private ByteBuffer expandByteBuffer(int sizeHint) {
final long needed = (long) sizeHint - _bb.remaining() + _bb.capacity(); // Max needed is 2G
if ((_h2o==null && _chan == null) || (_bb.hasArray() && needed < MTU)) {
if (needed > MAX_ARRAY_SIZE) {
throw new IllegalArgumentException("Cannot allocate more than 2GB array: sizeHint="+sizeHint+", "
+ "needed="+needed
+ ", bb.remaining()=" + _bb.remaining() + ", bb.capacity()="+_bb.capacity());
}
byte[] ary = _bb.array();
// just get twice what is currently needed but not more then max array size (2G)
// Be careful not to overflow because of integer math!
int newLen = (int) Math.min(1L << (water.util.MathUtils.log2(needed)+1), MAX_ARRAY_SIZE);
int oldpos = _bb.position();
_bb = ByteBuffer.wrap(MemoryManager.arrayCopyOfRange(ary,0,newLen),oldpos,newLen-oldpos)
.order(ByteOrder.nativeOrder());
} else if (_bb.capacity() != BBP_BIG._size) { //avoid expanding existing BBP items
int oldPos = _bb.position();
_bb.flip();
_bb = BBP_BIG.make().put(_bb);
_bb.position(oldPos);
}
return _bb;
}
@SuppressWarnings("unused") public String getStr(int off, int len) {
return new String(_bb.array(), _bb.arrayOffset()+off, len, UTF_8);
}
// -----------------------------------------------
// Utility functions to get various Java primitives
@SuppressWarnings("unused") public boolean getZ() { return get1()!=0; }
@SuppressWarnings("unused") public byte get1 () { return getSp(1).get (); }
@SuppressWarnings("unused") public int get1U() { return get1() & 0xFF; }
@SuppressWarnings("unused") public char get2 () { return getSp(2).getChar (); }
@SuppressWarnings("unused") public short get2s () { return getSp(2).getShort (); }
@SuppressWarnings("unused") public int get3 () { getSp(3); return get1U() | get1U() << 8 | get1U() << 16; }
@SuppressWarnings("unused") public int get4 () { return getSp(4).getInt (); }
@SuppressWarnings("unused") public float get4f() { return getSp(4).getFloat (); }
@SuppressWarnings("unused") public long get8 () { return getSp(8).getLong (); }
@SuppressWarnings("unused") public double get8d() { return getSp(8).getDouble(); }
int get1U(int off) { return _bb.get (off)&0xFF; }
int get4 (int off) { return _bb.getInt (off); }
long get8 (int off) { return _bb.getLong(off); }
@SuppressWarnings("unused") public AutoBuffer putZ (boolean b){ return put1(b?1:0); }
@SuppressWarnings("unused") public AutoBuffer put1 ( int b) { assert b >= -128 && b <= 255 : ""+b+" is not a byte";
putSp(1).put((byte)b); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( char c) { putSp(2).putChar (c); return this; }
@SuppressWarnings("unused") public AutoBuffer put2 ( short s) { putSp(2).putShort (s); return this; }
@SuppressWarnings("unused") public AutoBuffer put2s ( short s) { return put2(s); }
@SuppressWarnings("unused") public AutoBuffer put3( int x ) { assert (-1<<24) <= x && x < (1<<24);
return put1((x)&0xFF).put1((x >> 8)&0xFF).put1(x >> 16); }
@SuppressWarnings("unused") public AutoBuffer put4 ( int i) { putSp(4).putInt (i); return this; }
@SuppressWarnings("unused") public AutoBuffer put4f( float f) { putSp(4).putFloat (f); return this; }
@SuppressWarnings("unused") public AutoBuffer put8 ( long l) { putSp(8).putLong (l); return this; }
@SuppressWarnings("unused") public AutoBuffer put8d(double d) { putSp(8).putDouble(d); return this; }
public AutoBuffer put(Freezable f) {
if( f == null ) return putInt(TypeMap.NULL);
assert f.frozenType() > 0 : "No TypeMap for "+f.getClass().getName();
putInt(f.frozenType());
return f.write(this);
}
public <T extends Freezable> T get() {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
return (T)TypeMap.newFreezable(id).read(this);
}
public <T extends Freezable> T get(Class<T> tc) {
int id = getInt();
if( id == TypeMap.NULL ) return null;
if( _is!=null ) id = _typeMap[id];
assert tc.isInstance(TypeMap.theFreezable(id)):tc.getName() + " != " + TypeMap.theFreezable(id).getClass().getName() + ", id = " + id;
return (T)TypeMap.newFreezable(id).read(this);
}
// Write Key's target IFF the Key is not null; target can be null.
public AutoBuffer putKey(Key k) {
if( k==null ) return this; // Key is null ==> write nothing
Keyed kd = DKV.getGet(k);
put(kd);
return kd == null ? this : kd.writeAll_impl(this);
}
public Keyed getKey(Key k, Futures fs) {
return k==null ? null : getKey(fs); // Key is null ==> read nothing
}
public Keyed getKey(Futures fs) {
Keyed kd = get(Keyed.class);
if( kd == null ) return null;
DKV.put(kd,fs);
return kd.readAll_impl(this,fs);
}
// Put a (compressed) integer. Specifically values in the range -1 to ~250
// will take 1 byte, values near a Short will take 1+2 bytes, values near an
// Int will take 1+4 bytes, and bigger values 1+8 bytes. This compression is
// optimized for small integers (including -1 which is often used as a "array
// is null" flag when passing the array length).
public AutoBuffer putInt(int x) {
if( 0 <= (x+1)&& (x+1) <= 253 ) return put1(x+1);
if( Short.MIN_VALUE <= x && x <= Short.MAX_VALUE ) return put1(255).put2((short)x);
return put1(254).put4(x);
}
// Get a (compressed) integer. See above for the compression strategy and reasoning.
int getInt( ) {
int x = get1U();
if( x <= 253 ) return x-1;
if( x==255 ) return (short)get2();
assert x==254;
return get4();
}
// Put a zero-compressed array. Compression is:
// If null : putInt(-1)
// Else
// putInt(# of leading nulls)
// putInt(# of non-nulls)
// If # of non-nulls is > 0, putInt( # of trailing nulls)
long putZA( Object[] A ) {
if( A==null ) { putInt(-1); return 0; }
int x=0; for( ; x<A.length; x++ ) if( A[x ]!=null ) break;
int y=A.length; for( ; y>x; y-- ) if( A[y-1]!=null ) break;
putInt(x); // Leading zeros to skip
putInt(y-x); // Mixed non-zero guts in middle
if( y > x ) // If any trailing nulls
putInt(A.length-y); // Trailing zeros
return ((long)x<<32)|(y-x); // Return both leading zeros, and middle non-zeros
}
// Get the lengths of a zero-compressed array.
// Returns -1 if null.
// Returns a long of (leading zeros | middle non-zeros).
// If there are non-zeros, caller has to read the trailing zero-length.
long getZA( ) {
int x=getInt(); // Length of leading zeros
if( x == -1 ) return -1; // or a null
int nz=getInt(); // Non-zero in the middle
return ((long)x<<32)|(long)nz; // Return both ints
}
// TODO: untested. . .
@SuppressWarnings("unused")
public AutoBuffer putAEnum(Enum[] enums) {
//_arys++;
long xy = putZA(enums);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putEnum(enums[i]);
return this;
}
@SuppressWarnings("unused")
public <E extends Enum> E[] getAEnum(E[] values) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
E[] ts = (E[]) Array.newInstance(values.getClass().getComponentType(), x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getEnum(values);
return ts;
}
@SuppressWarnings("unused")
public AutoBuffer putA(Freezable[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) put(fs[i]);
return this;
}
public AutoBuffer putAA(Freezable[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAA(Freezable[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA(fs[i]);
return this;
}
public <T extends Freezable> T[] getA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = get(tc);
return ts;
}
public <T extends Freezable> T[][] getAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[]> tcA = (Class<T[]>) Array.newInstance(tc, 0).getClass();
T[][] ts = (T[][]) Array.newInstance(tcA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getA(tc);
return ts;
}
@SuppressWarnings("unused") public <T extends Freezable> T[][][] getAAA(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
Class<T[] > tcA = (Class<T[] >) Array.newInstance(tc , 0).getClass();
Class<T[][]> tcAA = (Class<T[][]>) Array.newInstance(tcA, 0).getClass();
T[][][] ts = (T[][][]) Array.newInstance(tcAA, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAA(tc);
return ts;
}
public AutoBuffer putAStr(String[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putStr(fs[i]);
return this;
}
public String[] getAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[] ts = new String[x+y+z];
for( int i = x; i < x+y; ++i ) ts[i] = getStr();
return ts;
}
@SuppressWarnings("unused") public AutoBuffer putAAStr(String[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAStr(fs[i]);
return this;
}
@SuppressWarnings("unused") public String[][] getAAStr() {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
String[][] ts = new String[x+y+z][];
for( int i = x; i < x+y; ++i ) ts[i] = getAStr();
return ts;
}
// Read the smaller of _bb.remaining() and len into buf.
// Return bytes read, which could be zero.
int read( byte[] buf, int off, int len ) {
int sz = Math.min(_bb.remaining(),len);
_bb.get(buf,off,sz);
return sz;
}
// -----------------------------------------------
// Utility functions to handle common UDP packet tasks.
// Get the 1st control byte
int getCtrl( ) { return getSz(1).get(0)&0xFF; }
// Get the port in next 2 bytes
int getPort( ) { return getSz(1+2).getChar(1); }
// Get the task# in the next 4 bytes
int getTask( ) { return getSz(1+2+4).getInt(1+2); }
// Get the flag in the next 1 byte
int getFlag( ) { return getSz(1+2+4+1).get(1+2+4); }
// Set the ctrl, port, task. Ready to write more bytes afterwards
AutoBuffer putUdp (UDP.udp type) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2);
_bb.put ((byte)type.ordinal());
_bb.putChar((char)H2O.H2O_PORT ); // Outgoing port is always the sender's (me) port
return this;
}
AutoBuffer putTask(UDP.udp type, int tasknum) {
return putUdp(type).put4(tasknum);
}
AutoBuffer putTask(int ctrl, int tasknum) {
assert _bb.position() == 0;
putSp(_bb.position()+1+2+4);
_bb.put((byte)ctrl).putChar((char)H2O.H2O_PORT).putInt(tasknum);
return this;
}
// -----------------------------------------------
// Utility functions to read & write arrays
public boolean[] getAZ() {
int len = getInt();
if (len == -1) return null;
boolean[] r = new boolean[len];
for (int i=0;i<len;++i) r[i] = getZ();
return r;
}
public byte[] getA1( ) {
//_arys++;
int len = getInt();
return len == -1 ? null : getA1(len);
}
public byte[] getA1( int len ) {
byte[] buf = MemoryManager.malloc1(len);
int sofar = 0;
while( sofar < len ) {
int more = Math.min(_bb.remaining(), len - sofar);
_bb.get(buf, sofar, more);
sofar += more;
if( sofar < len ) getSp(Math.min(_bb.capacity(), len-sofar));
}
return buf;
}
public short[] getA2( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
short[] buf = MemoryManager.malloc2(len);
int sofar = 0;
while( sofar < buf.length ) {
ShortBuffer as = _bb.asShortBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*2);
if( sofar < len ) getSp(Math.min(_bb.capacity()-1, (len-sofar)*2));
}
return buf;
}
public int[] getA4( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
int[] buf = MemoryManager.malloc4(len);
int sofar = 0;
while( sofar < buf.length ) {
IntBuffer as = _bb.asIntBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public float[] getA4f( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
float[] buf = MemoryManager.malloc4f(len);
int sofar = 0;
while( sofar < buf.length ) {
FloatBuffer as = _bb.asFloatBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*4);
if( sofar < len ) getSp(Math.min(_bb.capacity()-3, (len-sofar)*4));
}
return buf;
}
public long[] getA8( ) {
//_arys++;
// Get the lengths of lead & trailing zero sections, and the non-zero
// middle section.
int x = getInt(); if( x == -1 ) return null;
int y = getInt(); // Non-zero in the middle
int z = y==0 ? 0 : getInt();// Trailing zeros
long[] buf = MemoryManager.malloc8(x+y+z);
switch( get1U() ) { // 1,2,4 or 8 for how the middle section is passed
case 1: for( int i=x; i<x+y; i++ ) buf[i] = get1U(); return buf;
case 2: for( int i=x; i<x+y; i++ ) buf[i] = (short)get2(); return buf;
case 4: for( int i=x; i<x+y; i++ ) buf[i] = get4(); return buf;
case 8: break;
default: throw H2O.fail();
}
int sofar = x;
while( sofar < x+y ) {
LongBuffer as = _bb.asLongBuffer();
int more = Math.min(as.remaining(), x+y - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < x+y ) getSp(Math.min(_bb.capacity()-7, (x+y-sofar)*8));
}
return buf;
}
public double[] getA8d( ) {
//_arys++;
int len = getInt(); if( len == -1 ) return null;
double[] buf = MemoryManager.malloc8d(len);
int sofar = 0;
while( sofar < len ) {
DoubleBuffer as = _bb.asDoubleBuffer();
int more = Math.min(as.remaining(), len - sofar);
as.get(buf, sofar, more);
sofar += more;
_bb.position(_bb.position() + as.position()*8);
if( sofar < len ) getSp(Math.min(_bb.capacity()-7, (len-sofar)*8));
}
return buf;
}
@SuppressWarnings("unused")
public byte[][] getAA1( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
byte[][] ary = new byte[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA1();
return ary;
}
@SuppressWarnings("unused")
public short[][] getAA2( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
short[][] ary = new short[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA2();
return ary;
}
public int[][] getAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][] ary = new int[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4();
return ary;
}
@SuppressWarnings("unused") public float[][] getAA4f( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
float[][] ary = new float[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA4f();
return ary;
}
public long[][] getAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][] ary = new long[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8();
return ary;
}
@SuppressWarnings("unused") public double[][] getAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][] ary = new double[x+y+z][];
for( int i=x; i<x+y; i++ ) ary[i] = getA8d();
return ary;
}
@SuppressWarnings("unused") public int[][][] getAAA4( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
int[][][] ary = new int[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA4();
return ary;
}
@SuppressWarnings("unused") public long[][][] getAAA8( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
long[][][] ary = new long[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8();
return ary;
}
public double[][][] getAAA8d( ) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
double[][][] ary = new double[x+y+z][][];
for( int i=x; i<x+y; i++ ) ary[i] = getAA8d();
return ary;
}
public String getStr( ) {
int len = getInt();
return len == -1 ? null : new String(getA1(len), UTF_8);
}
public <E extends Enum> E getEnum(E[] values ) {
int idx = get1();
return idx == -1 ? null : values[idx];
}
public AutoBuffer putAZ( boolean[] ary ) {
if( ary == null ) return putInt(-1);
putInt(ary.length);
for (boolean anAry : ary) putZ(anAry);
return this;
}
public AutoBuffer putA1( byte[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
return putA1(ary,ary.length);
}
public AutoBuffer putA1( byte[] ary, int length ) { return putA1(ary,0,length); }
public AutoBuffer putA1( byte[] ary, int sofar, int length ) {
if (length - sofar > _bb.remaining()) expandByteBuffer(length-sofar);
while( sofar < length ) {
int len = Math.min(length - sofar, _bb.remaining());
_bb.put(ary, sofar, len);
sofar += len;
if( sofar < length ) sendPartial();
}
return this;
}
AutoBuffer putA2( short[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*2 > _bb.remaining()) expandByteBuffer(ary.length*2);
int sofar = 0;
while( sofar < ary.length ) {
ShortBuffer sb = _bb.asShortBuffer();
int len = Math.min(ary.length - sofar, sb.remaining());
sb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + sb.position()*2);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA4( int[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
// Note: based on Brandon commit this should improve performance during parse (7d950d622ee3037555ecbab0e39404f8f0917652)
if (ary.length*4 > _bb.remaining()) {
expandByteBuffer(ary.length*4); // Try to expand BB buffer to fit input array
}
int sofar = 0;
while( sofar < ary.length ) {
IntBuffer ib = _bb.asIntBuffer();
int len = Math.min(ary.length - sofar, ib.remaining());
ib.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + ib.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8( long[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
// Trim leading & trailing zeros. Pass along the length of leading &
// trailing zero sections, and the non-zero section in the middle.
int x=0; for( ; x<ary.length; x++ ) if( ary[x ]!=0 ) break;
int y=ary.length; for( ; y>x; y-- ) if( ary[y-1]!=0 ) break;
int nzlen = y-x;
putInt(x);
putInt(nzlen);
if( nzlen > 0 ) // If any trailing nulls
putInt(ary.length-y); // Trailing zeros
// Size trim the NZ section: pass as bytes or shorts if possible.
long min=Long.MAX_VALUE, max=Long.MIN_VALUE;
for( int i=x; i<y; i++ ) { if( ary[i]<min ) min=ary[i]; if( ary[i]>max ) max=ary[i]; }
if( 0 <= min && max < 256 ) { // Ship as unsigned bytes
put1(1); for( int i=x; i<y; i++ ) put1((int)ary[i]);
return this;
}
if( Short.MIN_VALUE <= min && max < Short.MAX_VALUE ) { // Ship as shorts
put1(2); for( int i=x; i<y; i++ ) put2((short)ary[i]);
return this;
}
if( Integer.MIN_VALUE <= min && max < Integer.MAX_VALUE ) { // Ship as ints
put1(4); for( int i=x; i<y; i++ ) put4((int)ary[i]);
return this;
}
put1(8); // Ship as full longs
int sofar = x;
if ((y-sofar)*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
while( sofar < y ) {
LongBuffer lb = _bb.asLongBuffer();
int len = Math.min(y - sofar, lb.remaining());
lb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + lb.position() * 8);
if( sofar < y ) sendPartial();
}
return this;
}
public AutoBuffer putA4f( float[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*4 > _bb.remaining()) expandByteBuffer(ary.length*4);
int sofar = 0;
while( sofar < ary.length ) {
FloatBuffer fb = _bb.asFloatBuffer();
int len = Math.min(ary.length - sofar, fb.remaining());
fb.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + fb.position()*4);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putA8d( double[] ary ) {
//_arys++;
if( ary == null ) return putInt(-1);
putInt(ary.length);
if (ary.length*8 > _bb.remaining()) expandByteBuffer(ary.length*8);
int sofar = 0;
while( sofar < ary.length ) {
DoubleBuffer db = _bb.asDoubleBuffer();
int len = Math.min(ary.length - sofar, db.remaining());
db.put(ary, sofar, len);
sofar += len;
_bb.position(_bb.position() + db.position()*8);
if( sofar < ary.length ) sendPartial();
}
return this;
}
public AutoBuffer putAA1( byte[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA1(ary[i]);
return this;
}
@SuppressWarnings("unused") AutoBuffer putAA2( short[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA2(ary[i]);
return this;
}
public AutoBuffer putAA4( int[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4(ary[i]);
return this;
}
@SuppressWarnings("unused")
public AutoBuffer putAA4f( float[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA4f(ary[i]);
return this;
}
public AutoBuffer putAA8( long[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8(ary[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAA8d( double[][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putA8d(ary[i]);
return this;
}
public AutoBuffer putAAA4( int[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA4(ary[i]);
return this;
}
public AutoBuffer putAAA8( long[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8(ary[i]);
return this;
}
public AutoBuffer putAAA8d( double[][][] ary ) {
//_arys++;
long xy = putZA(ary);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAA8d(ary[i]);
return this;
}
// Put a String as bytes (not chars!)
public AutoBuffer putStr( String s ) {
if( s==null ) return putInt(-1);
return putA1(StringUtils.bytesOf(s));
}
@SuppressWarnings("unused") public AutoBuffer putEnum( Enum x ) {
return put1(x==null ? -1 : x.ordinal());
}
public static byte[] javaSerializeWritePojo(Object o) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream out = null;
try {
out = new ObjectOutputStream(bos);
out.writeObject(o);
out.close();
return bos.toByteArray();
} catch (IOException e) {
throw Log.throwErr(e);
}
}
public static Object javaSerializeReadPojo(byte [] bytes) {
try {
final ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
Object o = ois.readObject();
return o;
} catch (IOException e) {
String className = nameOfClass(bytes);
throw Log.throwErr(new RuntimeException("Failed to deserialize " + className, e));
} catch (ClassNotFoundException e) {
throw Log.throwErr(e);
}
}
static String nameOfClass(byte[] bytes) {
if (bytes == null) return "(null)";
if (bytes.length < 11) return "(no name)";
int nameSize = Math.min(40, Math.max(3, bytes[7]));
return new String(bytes, 8, Math.min(nameSize, bytes.length - 8));
}
// ==========================================================================
// Java Serializable objects
// Note: These are heck-a-lot more expensive than their Freezable equivalents.
@SuppressWarnings("unused") public AutoBuffer putSer( Object obj ) {
if (obj == null) return putA1(null);
return putA1(javaSerializeWritePojo(obj));
}
@SuppressWarnings("unused") public AutoBuffer putASer(Object[] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putSer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAASer(Object[][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public AutoBuffer putAAASer(Object[][][] fs) {
//_arys++;
long xy = putZA(fs);
if( xy == -1 ) return this;
int x=(int)(xy>>32);
int y=(int)xy;
for( int i=x; i<x+y; i++ ) putAASer(fs[i]);
return this;
}
@SuppressWarnings("unused") public Object getSer() {
byte[] ba = getA1();
return ba == null ? null : javaSerializeReadPojo(ba);
}
@SuppressWarnings("unused") public <T> T getSer(Class<T> tc) {
return (T)getSer();
}
@SuppressWarnings("unused") public <T> T[] getASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[] ts = (T[]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getSer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][] getAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][] ts = (T[][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getASer(tc);
return ts;
}
@SuppressWarnings("unused") public <T> T[][][] getAAASer(Class<T> tc) {
//_arys++;
long xy = getZA();
if( xy == -1 ) return null;
int x=(int)(xy>>32); // Leading nulls
int y=(int)xy; // Middle non-zeros
int z = y==0 ? 0 : getInt(); // Trailing nulls
T[][][] ts = (T[][][]) Array.newInstance(tc, x+y+z);
for( int i = x; i < x+y; ++i ) ts[i] = getAASer(tc);
return ts;
}
// ==========================================================================
// JSON AutoBuffer printers
public AutoBuffer putJNULL( ) { return put1('n').put1('u').put1('l').put1('l'); }
// Escaped JSON string
private AutoBuffer putJStr( String s ) {
byte[] b = StringUtils.bytesOf(s);
int off=0;
for( int i=0; i<b.length; i++ ) {
if( b[i] == '\\' || b[i] == '"') { // Double up backslashes, escape quotes
putA1(b,off,i); // Everything so far (no backslashes)
put1('\\'); // The extra backslash
off=i; // Advance the "so far" variable
}
// Handle remaining special cases in JSON
// if( b[i] == '/' ) { putA1(b,off,i); put1('\\'); put1('/'); off=i+1; continue;}
if( b[i] == '\b' ) { putA1(b,off,i); put1('\\'); put1('b'); off=i+1; continue;}
if( b[i] == '\f' ) { putA1(b,off,i); put1('\\'); put1('f'); off=i+1; continue;}
if( b[i] == '\n' ) { putA1(b,off,i); put1('\\'); put1('n'); off=i+1; continue;}
if( b[i] == '\r' ) { putA1(b,off,i); put1('\\'); put1('r'); off=i+1; continue;}
if( b[i] == '\t' ) { putA1(b,off,i); put1('\\'); put1('t'); off=i+1; continue;}
// ASCII Control characters
if( b[i] == 127 ) { putA1(b,off,i); put1('\\'); put1('u'); put1('0'); put1('0'); put1('7'); put1('f'); off=i+1; continue;}
if( b[i] >= 0 && b[i] < 32 ) {
String hexStr = Integer.toHexString(b[i]);
putA1(b, off, i); put1('\\'); put1('u');
for (int j = 0; j < 4 - hexStr.length(); j++) put1('0');
for (int j = 0; j < hexStr.length(); j++) put1(hexStr.charAt(hexStr.length()-j-1));
off=i+1;
}
}
return putA1(b,off,b.length);
}
public AutoBuffer putJSONStrUnquoted ( String s ) { return s==null ? putJNULL() : putJStr(s); }
public AutoBuffer putJSONStrUnquoted ( String name, String s ) { return s==null ? putJSONStr(name).put1(':').putJNULL() : putJSONStr(name).put1(':').putJStr(s); }
public AutoBuffer putJSONName( String s ) { return put1('"').putJStr(s).put1('"'); }
public AutoBuffer putJSONStr ( String s ) { return s==null ? putJNULL() : putJSONName(s); }
public AutoBuffer putJSONAStr(String[] ss) {
if( ss == null ) return putJNULL();
put1('[');
for( int i=0; i<ss.length; i++ ) {
if( i>0 ) put1(',');
putJSONStr(ss[i]);
}
return put1(']');
}
private AutoBuffer putJSONAAStr( String[][] sss) {
if( sss == null ) return putJNULL();
put1('[');
for( int i=0; i<sss.length; i++ ) {
if( i>0 ) put1(',');
putJSONAStr(sss[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSONStr (String name, String s ) { return putJSONStr(name).put1(':').putJSONStr(s); }
@SuppressWarnings("unused") public AutoBuffer putJSONAStr (String name, String[] ss ) { return putJSONStr(name).put1(':').putJSONAStr(ss); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAStr(String name, String[][]sss) { return putJSONStr(name).put1(':').putJSONAAStr(sss); }
@SuppressWarnings("unused") public AutoBuffer putJSONSer (String name, Object o ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONASer (String name, Object[] oo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAASer (String name, Object[][] ooo ) { return putJSONStr(name).put1(':').putJNULL(); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAASer(String name, Object[][][] oooo) { return putJSONStr(name).put1(':').putJNULL(); }
public AutoBuffer putJSONAZ( String name, boolean[] f) { return putJSONStr(name).put1(':').putJSONAZ(f); }
public AutoBuffer putJSON(Freezable ice) { return ice == null ? putJNULL() : ice.writeJSON(this); }
public AutoBuffer putJSONA( Freezable fs[] ) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSON(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA( Freezable fs[][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONA(fs[i]);
}
return put1(']');
}
public AutoBuffer putJSONAAA( Freezable fs[][][]) {
if( fs == null ) return putJNULL();
put1('[');
for( int i=0; i<fs.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA(fs[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON ( String name, Freezable f ) { return putJSONStr(name).put1(':').putJSON (f); }
public AutoBuffer putJSONA ( String name, Freezable f[] ) { return putJSONStr(name).put1(':').putJSONA (f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA( String name, Freezable f[][]){ return putJSONStr(name).put1(':').putJSONAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONAAA( String name, Freezable f[][][]){ return putJSONStr(name).put1(':').putJSONAAA(f); }
@SuppressWarnings("unused") public AutoBuffer putJSONZ( String name, boolean value ) { return putJSONStr(name).put1(':').putJStr("" + value); }
private AutoBuffer putJSONAZ(boolean [] b) {
if (b == null) return putJNULL();
put1('[');
for( int i = 0; i < b.length; ++i) {
if (i > 0) put1(',');
putJStr(""+b[i]);
}
return put1(']');
}
// Most simple integers
private AutoBuffer putJInt( int i ) {
byte b[] = StringUtils.toBytes(i);
return putA1(b,b.length);
}
public AutoBuffer putJSON1( byte b ) { return putJInt(b); }
public AutoBuffer putJSONA1( byte ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON1(ary[i]);
}
return put1(']');
}
private AutoBuffer putJSONAA1(byte ary[][]) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA1(ary[i]);
}
return put1(']');
}
@SuppressWarnings("unused") public AutoBuffer putJSON1 (String name, byte b ) { return putJSONStr(name).put1(':').putJSON1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONA1 (String name, byte b[] ) { return putJSONStr(name).put1(':').putJSONA1(b); }
@SuppressWarnings("unused") public AutoBuffer putJSONAA1(String name, byte b[][]) { return putJSONStr(name).put1(':').putJSONAA1(b); }
public AutoBuffer putJSONAEnum(String name, Enum[] enums) {
return putJSONStr(name).put1(':').putJSONAEnum(enums);
}
public AutoBuffer putJSONAEnum( Enum[] enums ) {
if( enums == null ) return putJNULL();
put1('[');
for( int i=0; i<enums.length; i++ ) {
if( i>0 ) put1(',');
putJSONEnum(enums[i]);
}
return put1(']');
}
AutoBuffer putJSON2( char c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, char c ) { return putJSONStr(name).put1(':').putJSON2(c); }
AutoBuffer putJSON2( short c ) { return putJSON4(c); }
AutoBuffer putJSON2( String name, short c ) { return putJSONStr(name).put1(':').putJSON2(c); }
public AutoBuffer putJSONA2( String name, short ary[] ) { return putJSONStr(name).put1(':').putJSONA2(ary); }
AutoBuffer putJSONA2( short ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON2(ary[i]);
}
return put1(']');
}
AutoBuffer putJSON8 ( long l ) { return putJStr(Long.toString(l)); }
AutoBuffer putJSONA8( long ary[] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSON8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAA8( long ary[][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8( long ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8(ary[i]);
}
return put1(']');
}
AutoBuffer putJSONEnum( Enum e ) {
return e==null ? putJNULL() : put1('"').putJStr(e.toString()).put1('"');
}
public AutoBuffer putJSON8 ( String name, long l ) { return putJSONStr(name).put1(':').putJSON8(l); }
public AutoBuffer putJSONEnum( String name, Enum e ) { return putJSONStr(name).put1(':').putJSONEnum(e); }
public AutoBuffer putJSONA8( String name, long ary[] ) { return putJSONStr(name).put1(':').putJSONA8(ary); }
public AutoBuffer putJSONAA8( String name, long ary[][] ) { return putJSONStr(name).put1(':').putJSONAA8(ary); }
public AutoBuffer putJSONAAA8( String name, long ary[][][] ) { return putJSONStr(name).put1(':').putJSONAAA8(ary); }
public AutoBuffer putJSON4(int i) { return putJStr(Integer.toString(i)); }
AutoBuffer putJSONA4( int[] a) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAA4( int[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA4( int[][][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA4(a[i]);
}
return put1(']');
}
public AutoBuffer putJSON4 ( String name, int i ) { return putJSONStr(name).put1(':').putJSON4(i); }
public AutoBuffer putJSONA4( String name, int[] a) { return putJSONStr(name).put1(':').putJSONA4(a); }
public AutoBuffer putJSONAA4( String name, int[][] a ) { return putJSONStr(name).put1(':').putJSONAA4(a); }
public AutoBuffer putJSONAAA4( String name, int[][][] a ) { return putJSONStr(name).put1(':').putJSONAAA4(a); }
AutoBuffer putJSON4f ( float f ) { return f==Float.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(f==Float.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Float.isNaN(f)?putJSONStr(JSON_NAN):putJStr(Float .toString(f)))); }
public AutoBuffer putJSON4f ( String name, float f ) { return putJSONStr(name).put1(':').putJSON4f(f); }
AutoBuffer putJSONA4f( float[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON4f(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONA4f(String name, float[] a) {
putJSONStr(name).put1(':');
return putJSONA4f(a);
}
AutoBuffer putJSONAA4f(String name, float[][] a) {
putJSONStr(name).put1(':');
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA4f(a[i]);
}
return put1(']');
}
AutoBuffer putJSON8d( double d ) {
if (TwoDimTable.isEmpty(d)) return putJNULL();
return d==Double.POSITIVE_INFINITY?putJSONStr(JSON_POS_INF):(d==Double.NEGATIVE_INFINITY?putJSONStr(JSON_NEG_INF):(Double.isNaN(d)?putJSONStr(JSON_NAN):putJStr(Double.toString(d))));
}
public AutoBuffer putJSON8d( String name, double d ) { return putJSONStr(name).put1(':').putJSON8d(d); }
public AutoBuffer putJSONA8d( String name, double[] a ) {
return putJSONStr(name).put1(':').putJSONA8d(a);
}
public AutoBuffer putJSONAA8d( String name, double[][] a) {
return putJSONStr(name).put1(':').putJSONAA8d(a);
}
public AutoBuffer putJSONAAA8d( String name, double[][][] a) { return putJSONStr(name).put1(':').putJSONAAA8d(a); }
public AutoBuffer putJSONA8d( double[] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSON8d(a[i]);
}
return put1(']');
}
public AutoBuffer putJSONAA8d( double[][] a ) {
if( a == null ) return putJNULL();
put1('[');
for( int i=0; i<a.length; i++ ) {
if( i>0 ) put1(',');
putJSONA8d(a[i]);
}
return put1(']');
}
AutoBuffer putJSONAAA8d( double ary[][][] ) {
if( ary == null ) return putJNULL();
put1('[');
for( int i=0; i<ary.length; i++ ) {
if( i>0 ) put1(',');
putJSONAA8d(ary[i]);
}
return put1(']');
}
static final String JSON_NAN = "NaN";
static final String JSON_POS_INF = "Infinity";
static final String JSON_NEG_INF = "-Infinity";
}
|
Java
|
"use strict";
import chai from "chai";
import chaiAsPromised from "chai-as-promised";
import sinon from "sinon";
import BusinessElementsClient from "../src";
import uuid from "uuid";
import * as requests from "../src/requests";
chai.use(chaiAsPromised);
chai.should();
chai.config.includeStack = true;
const FAKE_SERVER_URL = "http://api.fake-server";
/** @test {Attribute} */
describe("Attribute", () => {
let sandbox, client, attributeId, attribute;
beforeEach(() => {
sandbox = sinon.sandbox.create();
client = new BusinessElementsClient(FAKE_SERVER_URL);
attributeId = uuid.v4();
attribute = client.tenant("example.com").attributes().attribute(attributeId);
});
afterEach(() => {
sandbox.restore();
});
/** @test {Attribute#get} */
describe("#get()", () => {
const data = {id: attributeId};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(data));
});
it("should get capture", () => {
attribute.get();
sinon.assert.calledWithMatch(client.execute, {
path: `/attributes/${attributeId}`
});
});
it("should return attribute data", () => {
return attribute.get().should.become(data);
});
});
/** @test {Attribute#edit} */
describe("#edit()", () => {
const response = {status: "Ok"};
const schema = {
"type": "object",
"properties": {
"type": {
"title": "type",
"type": "string"
}
}
};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(response));
sandbox.spy(requests, "updateAttribute");
});
it("should edit the attribute", () => {
attribute.edit(schema, {});
sinon.assert.calledWithMatch(requests.updateAttribute, attributeId, schema);
});
it("should return success", () => {
return attribute.edit(schema, {}).should.eventually.become(response);
});
});
/** @test {Attribute#remove} */
describe("#remove()", () => {
const response = {status: "Ok"};
beforeEach(() => {
sandbox.stub(client, "execute").returns(Promise.resolve(response));
sandbox.spy(requests, "deleteAttribute");
});
it("should delete the attribute", () => {
attribute.remove({});
sinon.assert.calledWithMatch(requests.deleteAttribute, attributeId);
});
it("should return success", () => {
return attribute.remove({}).should.eventually.become(response);
});
});
});
|
Java
|
/*
* Copyright (c) 2010-2013 Evolveum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.evolveum.midpoint.model.impl.lens;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import javax.xml.namespace.QName;
import com.evolveum.midpoint.prism.*;
import com.evolveum.midpoint.schema.DeltaConvertor;
import com.evolveum.midpoint.schema.result.OperationResult;
import com.evolveum.midpoint.util.exception.*;
import com.evolveum.midpoint.xml.ns._public.model.model_context_3.LensProjectionContextType;
import org.apache.commons.lang.StringUtils;
import org.jvnet.jaxb2_commons.lang.Validate;
import com.evolveum.midpoint.common.crypto.CryptoUtil;
import com.evolveum.midpoint.common.refinery.RefinedObjectClassDefinition;
import com.evolveum.midpoint.common.refinery.RefinedResourceSchema;
import com.evolveum.midpoint.common.refinery.ResourceShadowDiscriminator;
import com.evolveum.midpoint.model.api.context.ModelProjectionContext;
import com.evolveum.midpoint.model.api.context.SynchronizationPolicyDecision;
import com.evolveum.midpoint.prism.delta.ChangeType;
import com.evolveum.midpoint.prism.delta.DeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ObjectDelta;
import com.evolveum.midpoint.prism.delta.PrismValueDeltaSetTriple;
import com.evolveum.midpoint.prism.delta.ReferenceDelta;
import com.evolveum.midpoint.prism.path.ItemPath;
import com.evolveum.midpoint.schema.processor.ResourceAttribute;
import com.evolveum.midpoint.schema.processor.ResourceSchema;
import com.evolveum.midpoint.schema.util.MiscSchemaUtil;
import com.evolveum.midpoint.schema.util.ShadowUtil;
import com.evolveum.midpoint.schema.util.ResourceTypeUtil;
import com.evolveum.midpoint.schema.util.SchemaDebugUtil;
import com.evolveum.midpoint.util.Cloner;
import com.evolveum.midpoint.util.DebugUtil;
import com.evolveum.midpoint.xml.ns._public.common.common_3.AssignmentPolicyEnforcementType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.FocusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.LayerType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ObjectType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultStatusType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.OperationResultType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ProjectionPolicyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDefinitionType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceObjectTypeDependencyType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ResourceType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowAssociationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowDiscriminatorType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowKindType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ShadowType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.SynchronizationSituationType;
import com.evolveum.midpoint.xml.ns._public.common.common_3.ValuePolicyType;
/**
* @author semancik
*
*/
public class LensProjectionContext extends LensElementContext<ShadowType> implements ModelProjectionContext {
private ObjectDelta<ShadowType> syncDelta;
/**
* If set to true: absolute state of this projection was detected by the synchronization.
* This is mostly for debugging and visibility. It is not used by projection logic.
*/
private boolean syncAbsoluteTrigger = false;
/**
* The wave in which this resource should be processed. Initial value of -1 means "undetermined".
*/
private int wave = -1;
/**
* Indicates that the wave computation is still in progress.
*/
private transient boolean waveIncomplete = false;
/**
* Definition of account type.
*/
private ResourceShadowDiscriminator resourceShadowDiscriminator;
private boolean fullShadow = false;
/**
* True if the account is "legal" (assigned to the user). It may be false for accounts that are either
* found to be illegal by live sync, were unassigned from user, etc.
* If set to null the situation is not yet known. Null is a typical value when the context is constructed.
*/
private boolean isAssigned;
/**
* True if the account should be part of the synchronization. E.g. outbound expression should be applied to it.
*/
private boolean isActive;
/**
* True if there is a valid assignment for this projection and/or the policy allows such project to exist.
*/
private Boolean isLegal = null;
private Boolean isLegalOld = null;
private boolean isExists;
/**
* Decision regarding the account. It indicated what the engine has DECIDED TO DO with the context.
* If set to null no decision was made yet. Null is also a typical value when the context is created.
*/
private SynchronizationPolicyDecision synchronizationPolicyDecision;
/**
* True if we want to reconcile account in this context.
*/
private boolean doReconciliation;
/**
* Synchronization situation as it was originally detected by the synchronization code (SynchronizationService).
* This is mostly for debug purposes. Projector and clockwork do not need to care about this.
* The synchronization intent is used instead.
*/
private SynchronizationSituationType synchronizationSituationDetected = null;
/**
* Synchronization situation which was the result of synchronization reaction (projector and clockwork run).
* This is mostly for debug purposes. Projector and clockwork do not care about this (except for setting it).
* The synchronization decision is used instead.
*/
private SynchronizationSituationType synchronizationSituationResolved = null;
/**
* Delta set triple for accounts. Specifies which accounts should be added, removed or stay as they are.
* It tells almost nothing about attributes directly although the information about attributes are inside
* each account construction (in a form of ValueConstruction that contains attribute delta triples).
*
* Intermediary computation result. It is stored to allow re-computing of account constructions during
* iterative computations.
*/
private transient PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple;
private transient Construction outboundConstruction;
private transient Collection<ResourceObjectTypeDependencyType> dependencies = null;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes;
private transient Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations;
private ValuePolicyType accountPasswordPolicy;
/**
* Resource that hosts this projection.
*/
transient private ResourceType resource;
LensProjectionContext(LensContext<? extends ObjectType> lensContext, ResourceShadowDiscriminator resourceAccountType) {
super(ShadowType.class, lensContext);
this.resourceShadowDiscriminator = resourceAccountType;
this.isAssigned = false;
}
public ObjectDelta<ShadowType> getSyncDelta() {
return syncDelta;
}
public void setSyncDelta(ObjectDelta<ShadowType> syncDelta) {
this.syncDelta = syncDelta;
}
public boolean isSyncAbsoluteTrigger() {
return syncAbsoluteTrigger;
}
public void setSyncAbsoluteTrigger(boolean syncAbsoluteTrigger) {
this.syncAbsoluteTrigger = syncAbsoluteTrigger;
}
public int getWave() {
return wave;
}
public void setWave(int wave) {
this.wave = wave;
}
public boolean isWaveIncomplete() {
return waveIncomplete;
}
public void setWaveIncomplete(boolean waveIncomplete) {
this.waveIncomplete = waveIncomplete;
}
public boolean isDoReconciliation() {
return doReconciliation;
}
public void setDoReconciliation(boolean doReconciliation) {
this.doReconciliation = doReconciliation;
}
public ResourceShadowDiscriminator getResourceShadowDiscriminator() {
return resourceShadowDiscriminator;
}
public void setResourceShadowDiscriminator(ResourceShadowDiscriminator resourceShadowDiscriminator) {
this.resourceShadowDiscriminator = resourceShadowDiscriminator;
}
public boolean compareResourceShadowDiscriminator(ResourceShadowDiscriminator rsd, boolean compareOrder) {
Validate.notNull(rsd.getResourceOid());
if (resourceShadowDiscriminator == null) {
// This may be valid case e.g. in case of broken contexts or if a context is just loading
return false;
}
if (!rsd.getResourceOid().equals(resourceShadowDiscriminator.getResourceOid())) {
return false;
}
if (!rsd.getKind().equals(resourceShadowDiscriminator.getKind())) {
return false;
}
if (rsd.isThombstone() != resourceShadowDiscriminator.isThombstone()) {
return false;
}
if (rsd.getIntent() == null) {
try {
if (!getRefinedAccountDefinition().isDefaultInAKind()) {
return false;
}
} catch (SchemaException e) {
throw new SystemException("Internal error: "+e.getMessage(), e);
}
} else if (!rsd.getIntent().equals(resourceShadowDiscriminator.getIntent())) {
return false;
}
if (compareOrder && rsd.getOrder() != resourceShadowDiscriminator.getOrder()) {
return false;
}
return true;
}
public boolean isThombstone() {
if (resourceShadowDiscriminator == null) {
return false;
}
return resourceShadowDiscriminator.isThombstone();
}
public void addAccountSyncDelta(ObjectDelta<ShadowType> delta) throws SchemaException {
if (syncDelta == null) {
syncDelta = delta;
} else {
syncDelta.merge(delta);
}
}
public boolean isAdd() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isAdd();
}
public boolean isModify() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.KEEP) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
return super.isModify();
}
public boolean isDelete() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.DELETE) {
return true;
} else if (synchronizationPolicyDecision != null){
return false;
}
if (syncDelta != null && syncDelta.isDelete()) {
return true;
}
return super.isDelete();
}
public ResourceType getResource() {
return resource;
}
public void setResource(ResourceType resource) {
this.resource = resource;
}
public boolean isAssigned() {
return isAssigned;
}
public void setAssigned(boolean isAssigned) {
this.isAssigned = isAssigned;
}
public boolean isActive() {
return isActive;
}
public void setActive(boolean isActive) {
this.isActive = isActive;
}
public Boolean isLegal() {
return isLegal;
}
public void setLegal(Boolean isLegal) {
this.isLegal = isLegal;
}
public Boolean isLegalOld() {
return isLegalOld;
}
public void setLegalOld(Boolean isLegalOld) {
this.isLegalOld = isLegalOld;
}
public boolean isExists() {
return isExists;
}
public void setExists(boolean exists) {
this.isExists = exists;
}
public SynchronizationPolicyDecision getSynchronizationPolicyDecision() {
return synchronizationPolicyDecision;
}
public void setSynchronizationPolicyDecision(SynchronizationPolicyDecision policyDecision) {
this.synchronizationPolicyDecision = policyDecision;
}
public SynchronizationSituationType getSynchronizationSituationDetected() {
return synchronizationSituationDetected;
}
public void setSynchronizationSituationDetected(
SynchronizationSituationType synchronizationSituationDetected) {
this.synchronizationSituationDetected = synchronizationSituationDetected;
}
public SynchronizationSituationType getSynchronizationSituationResolved() {
return synchronizationSituationResolved;
}
public void setSynchronizationSituationResolved(
SynchronizationSituationType synchronizationSituationResolved) {
this.synchronizationSituationResolved = synchronizationSituationResolved;
}
public boolean isFullShadow() {
return fullShadow;
}
/**
* Returns true if full shadow is available, either loaded or in a create delta.
*/
public boolean hasFullShadow() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD) {
return true;
}
return isFullShadow();
}
public void setFullShadow(boolean fullShadow) {
this.fullShadow = fullShadow;
}
public ShadowKindType getKind() {
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
return discr.getKind();
}
if (getObjectOld()!=null) {
return getObjectOld().asObjectable().getKind();
}
if (getObjectCurrent()!=null) {
return getObjectCurrent().asObjectable().getKind();
}
if (getObjectNew()!=null) {
return getObjectNew().asObjectable().getKind();
}
return ShadowKindType.ACCOUNT;
}
public PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> getConstructionDeltaSetTriple() {
return constructionDeltaSetTriple;
}
public void setConstructionDeltaSetTriple(
PrismValueDeltaSetTriple<PrismPropertyValue<Construction>> constructionDeltaSetTriple) {
this.constructionDeltaSetTriple = constructionDeltaSetTriple;
}
public Construction getOutboundConstruction() {
return outboundConstruction;
}
public void setOutboundConstruction(Construction outboundConstruction) {
this.outboundConstruction = outboundConstruction;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> getSqueezedAttributes() {
return squeezedAttributes;
}
public void setSqueezedAttributes(Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> squeezedAttributes) {
this.squeezedAttributes = squeezedAttributes;
}
public Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> getSqueezedAssociations() {
return squeezedAssociations;
}
public void setSqueezedAssociations(
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismContainerValue<ShadowAssociationType>>>> squeezedAssociations) {
this.squeezedAssociations = squeezedAssociations;
}
public ResourceObjectTypeDefinitionType getResourceObjectTypeDefinitionType() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return null;
}
ResourceObjectTypeDefinitionType def = ResourceTypeUtil.getResourceObjectTypeDefinitionType(
resource, getResourceShadowDiscriminator().getKind(), resourceShadowDiscriminator.getIntent());
return def;
}
private ResourceSchema getResourceSchema() throws SchemaException {
return RefinedResourceSchema.getResourceSchema(resource, getNotNullPrismContext());
}
public RefinedResourceSchema getRefinedResourceSchema() throws SchemaException {
if (resource == null) {
return null;
}
return RefinedResourceSchema.getRefinedSchema(resource, LayerType.MODEL, getNotNullPrismContext());
}
public RefinedObjectClassDefinition getRefinedAccountDefinition() throws SchemaException {
RefinedResourceSchema refinedSchema = getRefinedResourceSchema();
if (refinedSchema == null) {
return null;
}
return refinedSchema.getRefinedDefinition(getResourceShadowDiscriminator().getKind(), getResourceShadowDiscriminator().getIntent());
}
public Collection<ResourceObjectTypeDependencyType> getDependencies() {
if (dependencies == null) {
ResourceObjectTypeDefinitionType resourceAccountTypeDefinitionType = getResourceObjectTypeDefinitionType();
if (resourceAccountTypeDefinitionType == null) {
// No dependencies. But we cannot set null as that means "unknown". So let's set empty collection instead.
dependencies = new ArrayList<ResourceObjectTypeDependencyType>();
} else {
dependencies = resourceAccountTypeDefinitionType.getDependency();
}
}
return dependencies;
}
public ValuePolicyType getAccountPasswordPolicy() {
return accountPasswordPolicy;
}
public void setAccountPasswordPolicy(ValuePolicyType accountPasswordPolicy) {
this.accountPasswordPolicy = accountPasswordPolicy;
}
public ValuePolicyType getEffectivePasswordPolicy() {
if (accountPasswordPolicy != null) {
return accountPasswordPolicy;
}
if (getLensContext().getFocusContext().getOrgPasswordPolicy() != null){
return getLensContext().getFocusContext().getOrgPasswordPolicy();
}
return getLensContext().getGlobalPasswordPolicy();
}
public AssignmentPolicyEnforcementType getAssignmentPolicyEnforcementType() {
// TODO: per-resource assignment enforcement
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
AssignmentPolicyEnforcementType globalAssignmentPolicyEnforcement = MiscSchemaUtil.getAssignmentPolicyEnforcementType(globalAccountSynchronizationSettings);
return globalAssignmentPolicyEnforcement;
}
public boolean isLegalize(){
ResourceType resource = getResource();
ProjectionPolicyType globalAccountSynchronizationSettings = null;
if (resource != null){
globalAccountSynchronizationSettings = resource.getProjection();
}
if (globalAccountSynchronizationSettings == null) {
globalAccountSynchronizationSettings = getLensContext().getAccountSynchronizationSettings();
}
if (globalAccountSynchronizationSettings == null){
return false;
}
if (globalAccountSynchronizationSettings.isLegalize() == null){
return false;
}
return globalAccountSynchronizationSettings.isLegalize();
}
/**
* Recomputes the new state of account (accountNew). It is computed by applying deltas to the old state (accountOld).
* Assuming that oldAccount is already set (or is null if it does not exist)
*/
public void recompute() throws SchemaException {
ObjectDelta<ShadowType> accDelta = getDelta();
PrismObject<ShadowType> base = getObjectCurrent();
if (base == null) {
base = getObjectOld();
}
ObjectDelta<ShadowType> syncDelta = getSyncDelta();
if (base == null && syncDelta != null
&& ChangeType.ADD.equals(syncDelta.getChangeType())) {
PrismObject<ShadowType> objectToAdd = syncDelta.getObjectToAdd();
if (objectToAdd != null) {
PrismObjectDefinition<ShadowType> objectDefinition = objectToAdd.getDefinition();
// TODO: remove constructor, use some factory method instead
base = new PrismObject<ShadowType>(objectToAdd.getElementName(), objectDefinition, getNotNullPrismContext());
base = syncDelta.computeChangedObject(base);
}
}
if (accDelta == null) {
// No change
setObjectNew(base);
return;
}
if (base == null && accDelta.isModify()) {
RefinedObjectClassDefinition rAccountDef = getRefinedAccountDefinition();
if (rAccountDef != null) {
base = (PrismObject<ShadowType>) rAccountDef.createBlankShadow();
}
}
setObjectNew(accDelta.computeChangedObject(base));
}
public void clearIntermediateResults() {
constructionDeltaSetTriple = null;
outboundConstruction = null;
squeezedAttributes = null;
}
/**
* Distribute the resource that's in the context into all the prism objects (old, new) and deltas.
* The resourceRef will not just contain the OID but also full resource object. This may optimize handling
* of the objects in upper layers (e.g. GUI).
*/
public void distributeResource() {
ResourceType resourceType = getResource();
if (resourceType == null) {
return;
}
PrismObject<ResourceType> resource = resourceType.asPrismObject();
distributeResourceObject(getObjectOld(), resource);
distributeResourceObject(getObjectCurrent(), resource);
distributeResourceObject(getObjectNew(), resource);
distributeResourceDelta(getPrimaryDelta(), resource);
distributeResourceDelta(getSecondaryDelta(), resource);
}
private void distributeResourceObject(PrismObject<ShadowType> object, PrismObject<ResourceType> resource) {
if (object == null) {
return;
}
PrismReference resourceRef = object.findReference(ShadowType.F_RESOURCE_REF);
if (resourceRef != null) {
distributeResourceValues(resourceRef.getValues(), resource);
}
}
private void distributeResourceValue(PrismReferenceValue resourceRefVal, PrismObject<ResourceType> resource) {
if (resourceRefVal != null) {
resourceRefVal.setObject(resource);
}
}
private void distributeResourceDelta(ObjectDelta<ShadowType> delta, PrismObject<ResourceType> resource) {
if (delta == null) {
return;
}
if (delta.isAdd()) {
distributeResourceObject(delta.getObjectToAdd(), resource);
} else if (delta.isModify()) {
ReferenceDelta referenceDelta = delta.findReferenceModification(ShadowType.F_RESOURCE_REF);
if (referenceDelta != null) {
distributeResourceValues(referenceDelta.getValuesToAdd(), resource);
distributeResourceValues(referenceDelta.getValuesToDelete(), resource);
distributeResourceValues(referenceDelta.getValuesToReplace(), resource);
}
} // Nothing to do for DELETE delta
}
private void distributeResourceValues(Collection<PrismReferenceValue> values, PrismObject<ResourceType> resource) {
if (values == null) {
return;
}
for(PrismReferenceValue pval: values) {
distributeResourceValue(pval, resource);
}
}
/**
* Returns delta suitable for execution. The primary and secondary deltas may not make complete sense all by themselves.
* E.g. they may both be MODIFY deltas even in case that the account should be created. The deltas begin to make sense
* only if combined with sync decision. This method provides the deltas all combined and ready for execution.
*/
public ObjectDelta<ShadowType> getExecutableDelta() throws SchemaException {
SynchronizationPolicyDecision policyDecision = getSynchronizationPolicyDecision();
ObjectDelta<ShadowType> origDelta = getDelta();
if (policyDecision == SynchronizationPolicyDecision.ADD) {
if (origDelta == null || origDelta.isModify()) {
// We need to convert modify delta to ADD
ObjectDelta<ShadowType> addDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.ADD, getPrismContext());
RefinedObjectClassDefinition rAccount = getRefinedAccountDefinition();
if (rAccount == null) {
throw new IllegalStateException("Definition for account type " + getResourceShadowDiscriminator()
+ " not found in the context, but it should be there");
}
PrismObject<ShadowType> newAccount = (PrismObject<ShadowType>) rAccount.createBlankShadow();
addDelta.setObjectToAdd(newAccount);
if (origDelta != null) {
addDelta.merge(origDelta);
}
return addDelta;
}
} else if (policyDecision == SynchronizationPolicyDecision.KEEP) {
// Any delta is OK
} else if (policyDecision == SynchronizationPolicyDecision.DELETE) {
ObjectDelta<ShadowType> deleteDelta = new ObjectDelta<ShadowType>(getObjectTypeClass(),
ChangeType.DELETE, getPrismContext());
String oid = getOid();
if (oid == null) {
throw new IllegalStateException(
"Internal error: account context OID is null during attempt to create delete secondary delta; context="
+this);
}
deleteDelta.setOid(oid);
return deleteDelta;
} else {
// This is either UNLINK or null, both are in fact the same as KEEP
// Any delta is OK
}
return origDelta;
}
public void checkConsistence() {
checkConsistence(null, true, false);
}
public void checkConsistence(String contextDesc, boolean fresh, boolean force) {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
// No not check these. they may be quite wild.
return;
}
super.checkConsistence(contextDesc);
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN) {
return;
}
if (fresh && !force) {
if (resource == null) {
throw new IllegalStateException("Null resource in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
if (resourceShadowDiscriminator == null) {
throw new IllegalStateException("Null resource account type in "+this + (contextDesc == null ? "" : " in " +contextDesc));
}
}
if (syncDelta != null) {
try {
syncDelta.checkConsistence(true, true, true);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
} catch (IllegalStateException e) {
throw new IllegalStateException(e.getMessage()+"; in "+getElementDesc()+" sync delta in "+this + (contextDesc == null ? "" : " in " +contextDesc), e);
}
}
}
protected boolean isRequireSecondardyDeltaOid() {
if (synchronizationPolicyDecision == SynchronizationPolicyDecision.ADD ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.BROKEN ||
synchronizationPolicyDecision == SynchronizationPolicyDecision.IGNORE) {
return false;
}
if (getResourceShadowDiscriminator() != null && getResourceShadowDiscriminator().getOrder() > 0) {
// These may not have the OID yet
return false;
}
return super.isRequireSecondardyDeltaOid();
}
@Override
public void cleanup() {
super.cleanup();
synchronizationPolicyDecision = null;
// isLegal = null;
// isLegalOld = null;
isAssigned = false;
isActive = false;
}
@Override
public void normalize() {
super.normalize();
if (syncDelta != null) {
syncDelta.normalize();
}
}
@Override
public void reset() {
super.reset();
wave = -1;
fullShadow = false;
isAssigned = false;
isActive = false;
synchronizationPolicyDecision = null;
constructionDeltaSetTriple = null;
outboundConstruction = null;
dependencies = null;
squeezedAttributes = null;
accountPasswordPolicy = null;
}
@Override
public void adopt(PrismContext prismContext) throws SchemaException {
super.adopt(prismContext);
if (syncDelta != null) {
prismContext.adopt(syncDelta);
}
}
@Override
public LensProjectionContext clone(LensContext<? extends ObjectType> lensContext) {
LensProjectionContext clone = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
copyValues(clone, lensContext);
return clone;
}
protected void copyValues(LensProjectionContext clone, LensContext<? extends ObjectType> lensContext) {
super.copyValues(clone, lensContext);
// do NOT clone transient values such as accountConstructionDeltaSetTriple
// these are not meant to be cloned and they are also not directly clonnable
clone.dependencies = this.dependencies;
clone.doReconciliation = this.doReconciliation;
clone.fullShadow = this.fullShadow;
clone.isAssigned = this.isAssigned;
clone.outboundConstruction = this.outboundConstruction;
clone.synchronizationPolicyDecision = this.synchronizationPolicyDecision;
clone.resource = this.resource;
clone.resourceShadowDiscriminator = this.resourceShadowDiscriminator;
clone.squeezedAttributes = cloneSqueezedAttributes();
if (this.syncDelta != null) {
clone.syncDelta = this.syncDelta.clone();
}
clone.wave = this.wave;
}
private Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> cloneSqueezedAttributes() {
if (squeezedAttributes == null) {
return null;
}
Map<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> clonedMap
= new HashMap<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>>();
Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>> cloner = new Cloner<ItemValueWithOrigin<PrismPropertyValue<?>>>() {
@Override
public ItemValueWithOrigin<PrismPropertyValue<?>> clone(ItemValueWithOrigin<PrismPropertyValue<?>> original) {
return original.clone();
}
};
for (Entry<QName, DeltaSetTriple<ItemValueWithOrigin<PrismPropertyValue<?>>>> entry: squeezedAttributes.entrySet()) {
clonedMap.put(entry.getKey(), entry.getValue().clone(cloner));
}
return clonedMap;
}
/**
* Returns true if the projection has any value for specified attribute.
*/
public boolean hasValueForAttribute(QName attributeName) throws SchemaException {
ItemPath attrPath = new ItemPath(ShadowType.F_ATTRIBUTES, attributeName);
if (getObjectNew() != null) {
PrismProperty<?> attrNew = getObjectNew().findProperty(attrPath);
if (attrNew != null && !attrNew.isEmpty()) {
return true;
}
}
return false;
}
private boolean hasValueForAttribute(QName attributeName, Collection<PrismPropertyValue<Construction>> acPpvSet) {
if (acPpvSet == null) {
return false;
}
for (PrismPropertyValue<Construction> acPpv: acPpvSet) {
Construction ac = acPpv.getValue();
if (ac.hasValueForAttribute(attributeName)) {
return true;
}
}
return false;
}
public AccountOperation getOperation() {
if (isAdd()) {
return AccountOperation.ADD;
}
if (isDelete()) {
return AccountOperation.DELETE;
}
return AccountOperation.MODIFY;
}
@Override
public void checkEncrypted() {
super.checkEncrypted();
if (syncDelta != null) {
CryptoUtil.checkEncrypted(syncDelta);
}
}
public String getHumanReadableName() {
StringBuilder sb = new StringBuilder();
sb.append("account(");
String humanReadableAccountIdentifier = getHumanReadableIdentifier();
if (StringUtils.isEmpty(humanReadableAccountIdentifier)) {
sb.append("no ID");
} else {
sb.append("ID ");
sb.append(humanReadableAccountIdentifier);
}
ResourceShadowDiscriminator discr = getResourceShadowDiscriminator();
if (discr != null) {
sb.append(", type '");
sb.append(discr.getIntent());
sb.append("', ");
if (discr.getOrder() != 0) {
sb.append("order ").append(discr.getOrder()).append(", ");
}
} else {
sb.append(" (no discriminator) ");
}
sb.append(getResource());
sb.append(")");
return sb.toString();
}
private String getHumanReadableIdentifier() {
PrismObject<ShadowType> object = getObjectNew();
if (object == null) {
object = getObjectOld();
}
if (object == null) {
object = getObjectCurrent();
}
if (object == null) {
return null;
}
if (object.canRepresent(ShadowType.class)) {
PrismObject<ShadowType> shadow = (PrismObject<ShadowType>)object;
Collection<ResourceAttribute<?>> identifiers = ShadowUtil.getIdentifiers(shadow);
if (identifiers == null) {
return null;
}
StringBuilder sb = new StringBuilder();
Iterator<ResourceAttribute<?>> iterator = identifiers.iterator();
while (iterator.hasNext()) {
ResourceAttribute<?> id = iterator.next();
sb.append(id.toHumanReadableString());
if (iterator.hasNext()) {
sb.append(",");
}
}
return sb.toString();
} else {
return object.toString();
}
}
@Override
public String debugDump() {
return debugDump(0);
}
@Override
public String debugDump(int indent) {
return debugDump(indent, true);
}
public String debugDump(int indent, boolean showTriples) {
StringBuilder sb = new StringBuilder();
SchemaDebugUtil.indentDebugDump(sb, indent);
sb.append("PROJECTION ");
sb.append(getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName());
sb.append(" ");
sb.append(getResourceShadowDiscriminator());
if (resource != null) {
sb.append(" : ");
sb.append(resource.getName().getOrig());
}
sb.append("\n");
SchemaDebugUtil.indentDebugDump(sb, indent + 1);
sb.append("OID: ").append(getOid());
sb.append(", wave ").append(wave);
if (fullShadow) {
sb.append(", full");
} else {
sb.append(", shadow");
}
sb.append(", exists=").append(isExists);
sb.append(", assigned=").append(isAssigned);
sb.append(", active=").append(isActive);
sb.append(", legal=").append(isLegalOld).append("->").append(isLegal);
sb.append(", recon=").append(doReconciliation);
sb.append(", syncIntent=").append(getSynchronizationIntent());
sb.append(", decision=").append(synchronizationPolicyDecision);
if (!isFresh()) {
sb.append(", NOT FRESH");
}
if (resourceShadowDiscriminator != null && resourceShadowDiscriminator.isThombstone()) {
sb.append(", THOMBSTONE");
}
if (syncAbsoluteTrigger) {
sb.append(", SYNC TRIGGER");
}
if (getIteration() != 0) {
sb.append(", iteration=").append(getIteration()).append(" (").append(getIterationToken()).append(")");
}
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("old"), getObjectOld(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("current"), getObjectCurrent(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("new"), getObjectNew(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("primary delta"), getPrimaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("secondary delta"), getSecondaryDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("sync delta"), getSyncDelta(), indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("executed deltas"), getExecutedDeltas(), indent+1);
if (showTriples) {
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("constructionDeltaSetTriple"), constructionDeltaSetTriple, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("outbound account construction"), outboundConstruction, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed attributes"), squeezedAttributes, indent + 1);
sb.append("\n");
DebugUtil.debugDumpWithLabel(sb, getDebugDumpTitle("squeezed associations"), squeezedAssociations, indent + 1);
// This is just a debug thing
// sb.append("\n");
// DebugUtil.indentDebugDump(sb, indent);
// sb.append("ACCOUNT dependencies\n");
// sb.append(DebugUtil.debugDump(dependencies, indent + 1));
}
return sb.toString();
}
@Override
protected String getElementDefaultDesc() {
return "projection";
}
@Override
public String toString() {
return "LensProjectionContext(" + (getObjectTypeClass() == null ? "null" : getObjectTypeClass().getSimpleName()) + ":" + getOid() +
( resource == null ? "" : " on " + resource ) + ")";
}
/**
* Return a human readable name of the projection object suitable for logs.
*/
public String toHumanReadableString() {
if (resourceShadowDiscriminator == null) {
return "(null" + resource + ")";
}
if (resource != null) {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resource + ")";
} else {
return "("+getKindValue(resourceShadowDiscriminator.getKind()) + " ("+resourceShadowDiscriminator.getIntent()+") on " + resourceShadowDiscriminator.getResourceOid() + ")";
}
}
public String getHumanReadableKind() {
if (resourceShadowDiscriminator == null) {
return "resource object";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
private String getKindValue(ShadowKindType kind) {
if (kind == null) {
return "null";
}
return kind.value();
}
@Override
protected String getElementDesc() {
if (resourceShadowDiscriminator == null) {
return "shadow";
}
return getKindValue(resourceShadowDiscriminator.getKind());
}
public void addToPrismContainer(PrismContainer<LensProjectionContextType> lensProjectionContextTypeContainer) throws SchemaException {
LensProjectionContextType lensProjectionContextType = lensProjectionContextTypeContainer.createNewValue().asContainerable();
super.storeIntoLensElementContextType(lensProjectionContextType);
lensProjectionContextType.setSyncDelta(syncDelta != null ? DeltaConvertor.toObjectDeltaType(syncDelta) : null);
lensProjectionContextType.setWave(wave);
lensProjectionContextType.setResourceShadowDiscriminator(resourceShadowDiscriminator != null ?
resourceShadowDiscriminator.toResourceShadowDiscriminatorType() : null);
lensProjectionContextType.setFullShadow(fullShadow);
lensProjectionContextType.setIsAssigned(isAssigned);
lensProjectionContextType.setIsActive(isActive);
lensProjectionContextType.setIsLegal(isLegal);
lensProjectionContextType.setIsLegalOld(isLegalOld);
lensProjectionContextType.setIsExists(isExists);
lensProjectionContextType.setSynchronizationPolicyDecision(synchronizationPolicyDecision != null ? synchronizationPolicyDecision.toSynchronizationPolicyDecisionType() : null);
lensProjectionContextType.setDoReconciliation(doReconciliation);
lensProjectionContextType.setSynchronizationSituationDetected(synchronizationSituationDetected);
lensProjectionContextType.setSynchronizationSituationResolved(synchronizationSituationResolved);
lensProjectionContextType.setAccountPasswordPolicy(accountPasswordPolicy);
lensProjectionContextType.setSyncAbsoluteTrigger(syncAbsoluteTrigger);
}
public static LensProjectionContext fromLensProjectionContextType(LensProjectionContextType projectionContextType, LensContext lensContext, OperationResult result) throws SchemaException, ConfigurationException, ObjectNotFoundException, CommunicationException {
String objectTypeClassString = projectionContextType.getObjectTypeClass();
if (StringUtils.isEmpty(objectTypeClassString)) {
throw new SystemException("Object type class is undefined in LensProjectionContextType");
}
ResourceShadowDiscriminator resourceShadowDiscriminator = ResourceShadowDiscriminator.fromResourceShadowDiscriminatorType(projectionContextType.getResourceShadowDiscriminator());
LensProjectionContext projectionContext = new LensProjectionContext(lensContext, resourceShadowDiscriminator);
projectionContext.retrieveFromLensElementContextType(projectionContextType, result);
if (projectionContextType.getSyncDelta() != null) {
projectionContext.syncDelta = DeltaConvertor.createObjectDelta(projectionContextType.getSyncDelta(), lensContext.getPrismContext());
} else {
projectionContext.syncDelta = null;
}
projectionContext.wave = projectionContextType.getWave() != null ? projectionContextType.getWave() : 0;
projectionContext.fullShadow = projectionContextType.isFullShadow() != null ? projectionContextType.isFullShadow() : false;
projectionContext.isAssigned = projectionContextType.isIsAssigned() != null ? projectionContextType.isIsAssigned() : false;
projectionContext.isActive = projectionContextType.isIsActive() != null ? projectionContextType.isIsActive() : false;
projectionContext.isLegal = projectionContextType.isIsLegal();
projectionContext.isExists = projectionContextType.isIsExists() != null ? projectionContextType.isIsExists() : false;
projectionContext.synchronizationPolicyDecision = SynchronizationPolicyDecision.fromSynchronizationPolicyDecisionType(projectionContextType.getSynchronizationPolicyDecision());
projectionContext.doReconciliation = projectionContextType.isDoReconciliation() != null ? projectionContextType.isDoReconciliation() : false;
projectionContext.synchronizationSituationDetected = projectionContextType.getSynchronizationSituationDetected();
projectionContext.synchronizationSituationResolved = projectionContextType.getSynchronizationSituationResolved();
projectionContext.accountPasswordPolicy = projectionContextType.getAccountPasswordPolicy();
projectionContext.syncAbsoluteTrigger = projectionContextType.isSyncAbsoluteTrigger();
return projectionContext;
}
// determines whether full shadow is present, based on operation result got from provisioning
public void determineFullShadowFlag(OperationResultType fetchResult) {
if (fetchResult != null
&& (fetchResult.getStatus() == OperationResultStatusType.PARTIAL_ERROR
|| fetchResult.getStatus() == OperationResultStatusType.FATAL_ERROR)) { // todo what about other kinds of status? [e.g. in-progress]
setFullShadow(false);
} else {
setFullShadow(true);
}
}
}
|
Java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.kafka;
import static org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult;
import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem;
import static org.hamcrest.Matchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
import org.apache.beam.sdk.Pipeline.PipelineExecutionException;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.coders.BigEndianIntegerCoder;
import org.apache.beam.sdk.coders.BigEndianLongCoder;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.InstantCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.apache.beam.sdk.io.Read;
import org.apache.beam.sdk.io.UnboundedSource;
import org.apache.beam.sdk.io.UnboundedSource.UnboundedReader;
import org.apache.beam.sdk.io.kafka.serialization.InstantDeserializer;
import org.apache.beam.sdk.metrics.GaugeResult;
import org.apache.beam.sdk.metrics.MetricName;
import org.apache.beam.sdk.metrics.MetricNameFilter;
import org.apache.beam.sdk.metrics.MetricQueryResults;
import org.apache.beam.sdk.metrics.MetricResult;
import org.apache.beam.sdk.metrics.MetricsFilter;
import org.apache.beam.sdk.metrics.SinkMetrics;
import org.apache.beam.sdk.metrics.SourceMetrics;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.testing.PAssert;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Distinct;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.Max;
import org.apache.beam.sdk.transforms.Min;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.Values;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.MockProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.utils.Utils;
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
import org.hamcrest.collection.IsIterableWithSize;
import org.joda.time.Instant;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of {@link KafkaIO}.
* Run with 'mvn test -Dkafka.clients.version=0.10.1.1',
* or 'mvn test -Dkafka.clients.version=0.9.0.1' for either Kafka client version.
*/
@RunWith(JUnit4.class)
public class KafkaIOTest {
private static final Logger LOG = LoggerFactory.getLogger(KafkaIOTest.class);
/*
* The tests below borrow code and structure from CountingSourceTest. In addition verifies
* the reader interleaves the records from multiple partitions.
*
* Other tests to consider :
* - test KafkaRecordCoder
*/
@Rule
public final transient TestPipeline p = TestPipeline.create();
@Rule
public ExpectedException thrown = ExpectedException.none();
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(
List<String> topics, int partitionsPerTopic, int numElements,
OffsetResetStrategy offsetResetStrategy) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
records.get(tp).add(
new ConsumerRecord<>(
tp.topic(),
tp.partition(),
offsets[pIdx]++,
ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key is 4 byte record id
ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value is 8 byte record id
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions =
new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer =
new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
// override assign() in order to set offset limits & to save assigned partitions.
//remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as:
//1. SpEL can find this function, either input is List or Collection;
//2. List extends Collection, so super.assign() could find either assign(List)
// or assign(Collection).
public void assign(final List<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
// Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have
// this method.
// Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions
// does not have the OffsetAndTimestamp class. So return a raw type and use reflection
// here.
@SuppressWarnings("unchecked")
public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
HashMap<TopicPartition, Object> result = new HashMap<>();
try {
Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp");
// OffsetAndTimestamp(long offset, long timestamp)
Constructor constructor = cls.getDeclaredConstructor(long.class, long.class);
// In test scope, timestamp == offset.
for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
long maxOffset = offsets[partitions.indexOf(entry.getKey())];
Long offset = entry.getValue();
if (offset >= maxOffset) {
offset = null;
}
result.put(
entry.getKey(), constructor.newInstance(entry.getValue(), offset));
}
return result;
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
}
}
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
private static class ConsumerFactoryFn
implements SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> {
private final List<String> topics;
private final int partitionsPerTopic;
private final int numElements;
private final OffsetResetStrategy offsetResetStrategy;
public ConsumerFactoryFn(List<String> topics,
int partitionsPerTopic,
int numElements,
OffsetResetStrategy offsetResetStrategy) {
this.topics = topics;
this.partitionsPerTopic = partitionsPerTopic;
this.numElements = numElements;
this.offsetResetStrategy = offsetResetStrategy;
}
@Override
public Consumer<byte[], byte[]> apply(Map<String, Object> config) {
return mkMockConsumer(topics, partitionsPerTopic, numElements, offsetResetStrategy);
}
}
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
return mkKafkaReadTransform(numElements, numElements, timestampFn);
}
/**
* Creates a consumer with two topics, with 10 partitions each.
* numElements are (round-robin) assigned all the 20 partitions.
*/
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
int maxNumRecords,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
List<String> topics = ImmutableList.of("topic_a", "topic_b");
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(maxNumRecords);
if (timestampFn != null) {
return reader.withTimestampFn(timestampFn);
} else {
return reader;
}
}
private static class AssertMultipleOf implements SerializableFunction<Iterable<Long>, Void> {
private final int num;
public AssertMultipleOf(int num) {
this.num = num;
}
@Override
public Void apply(Iterable<Long> values) {
for (Long v : values) {
assertEquals(0, v % num);
}
return null;
}
}
public static void addCountingAsserts(PCollection<Long> input, long numElements) {
// Count == numElements
// Unique count == numElements
// Min == 0
// Max == numElements-1
addCountingAsserts(input, numElements, numElements, 0L, numElements - 1);
}
public static void addCountingAsserts(
PCollection<Long> input, long count, long uniqueCount, long min, long max) {
PAssert
.thatSingleton(input.apply("Count", Count.<Long>globally()))
.isEqualTo(count);
PAssert
.thatSingleton(input.apply(Distinct.<Long>create())
.apply("UniqueCount", Count.<Long>globally()))
.isEqualTo(uniqueCount);
PAssert
.thatSingleton(input.apply("Min", Min.<Long>globally()))
.isEqualTo(min);
PAssert
.thatSingleton(input.apply("Max", Max.<Long>globally()))
.isEqualTo(max);
}
@Test
public void testUnboundedSource() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnreachableKafkaBrokers() {
// Expect an exception when the Kafka brokers are not reachable on the workers.
// We specify partitions explicitly so that splitting does not involve server interaction.
// Set request timeout to 10ms so that test does not take long.
thrown.expect(Exception.class);
thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'");
int numElements = 1000;
PCollection<Long> input = p
.apply(KafkaIO.<Integer, Long>read()
.withBootstrapServers("8.8.8.8:9092") // Google public DNS ip.
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0)))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.updateConsumerProperties(ImmutableMap.<String, Object>of(
ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10,
ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8,
ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8))
.withMaxNumRecords(10)
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithSingleTopic() {
// same as testUnboundedSource, but with single topic
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(numElements)
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithExplicitPartitions() {
int numElements = 1000;
List<String> topics = ImmutableList.of("test");
KafkaIO.Read<byte[], Long> reader = KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements / 10);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
// assert that every element is a multiple of 5.
PAssert
.that(input)
.satisfies(new AssertMultipleOf(5));
PAssert
.thatSingleton(input.apply(Count.<Long>globally()))
.isEqualTo(numElements / 10L);
p.run();
}
private static class ElementValueDiff extends DoFn<Long, Long> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
c.output(c.element() - c.timestamp().getMillis());
}
}
@Test
public void testUnboundedSourceTimestamps() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
PCollection<Long> diffs = input
.apply("TimestampDiff", ParDo.of(new ElementValueDiff()))
.apply("DistinctTimestamps", Distinct.<Long>create());
// This assert also confirms that diffs only has one unique value.
PAssert.thatSingleton(diffs).isEqualTo(0L);
p.run();
}
private static class RemoveKafkaMetadata<K, V> extends DoFn<KafkaRecord<K, V>, KV<K, V>> {
@ProcessElement
public void processElement(ProcessContext ctx) throws Exception {
ctx.output(ctx.element().getKV());
}
}
@Test
public void testUnboundedSourceSplits() throws Exception {
int numElements = 1000;
int numSplits = 10;
// Coders must be specified explicitly here due to the way the transform
// is used in the test.
UnboundedSource<KafkaRecord<Integer, Long>, ?> initial =
mkKafkaReadTransform(numElements, null)
.withKeyDeserializerAndCoder(IntegerDeserializer.class, BigEndianIntegerCoder.of())
.withValueDeserializerAndCoder(LongDeserializer.class, BigEndianLongCoder.of())
.makeSource();
List<? extends UnboundedSource<KafkaRecord<Integer, Long>, ?>> splits =
initial.split(numSplits, p.getOptions());
assertEquals("Expected exact splitting", numSplits, splits.size());
long elementsPerSplit = numElements / numSplits;
assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits);
PCollectionList<Long> pcollections = PCollectionList.empty(p);
for (int i = 0; i < splits.size(); ++i) {
pcollections = pcollections.and(
p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit))
.apply("Remove Metadata " + i, ParDo.of(new RemoveKafkaMetadata<Integer, Long>()))
.apply("collection " + i, Values.<Long>create()));
}
PCollection<Long> input = pcollections.apply(Flatten.<Long>pCollections());
addCountingAsserts(input, numElements);
p.run();
}
/**
* A timestamp function that uses the given value as the timestamp.
*/
private static class ValueAsTimestampFn
implements SerializableFunction<KV<Integer, Long>, Instant> {
@Override
public Instant apply(KV<Integer, Long> input) {
return new Instant(input.getValue());
}
}
// Kafka records are read in a separate thread inside the reader. As a result advance() might not
// read any records even from the mock consumer, especially for the first record.
// This is a helper method to loop until we read a record.
private static void advanceOnce(UnboundedReader<?> reader, boolean isStarted) throws IOException {
if (!isStarted && reader.start()) {
return;
}
while (!reader.advance()) {
// very rarely will there be more than one attempts.
// In case of a bug we might end up looping forever, and test will fail with a timeout.
// Avoid hard cpu spinning in case of a test failure.
try {
Thread.sleep(1);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testUnboundedSourceCheckpointMark() throws Exception {
int numElements = 85; // 85 to make sure some partitions have more records than other.
// create a single split:
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
final int numToSkip = 20; // one from each partition.
// advance numToSkip elements
for (int i = 0; i < numToSkip; ++i) {
advanceOnce(reader, i > 0);
}
// Confirm that we get the expected element in sequence before checkpointing.
assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue());
assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis());
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
reader = source.createReader(null, mark);
// Confirm that we get the next elements in sequence.
// This also confirms that Reader interleaves records from each partitions by the reader.
for (int i = numToSkip; i < numElements; i++) {
advanceOnce(reader, i > numToSkip);
assertEquals(i, (long) reader.getCurrent().getKV().getValue());
assertEquals(i, reader.getCurrentTimestamp().getMillis());
}
}
@Test
public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception {
// Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes
// properly from empty partitions, without missing messages added since checkpoint.
// Initialize consumer with fewer elements than number of partitions so that some are empty.
int initialNumElements = 5;
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
for (int l = 0; l < initialNumElements; ++l) {
advanceOnce(reader, l > 0);
}
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
// Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that
// the reader need to explicitly need to seek to first offset for partitions that were empty.
int numElements = 100; // all the 20 partitions will have elements
List<String> topics = ImmutableList.of("topic_a", "topic_b");
source = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.LATEST))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements)
.withTimestampFn(new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
reader = source.createReader(null, mark);
// Verify in any order. As the partitions are unevenly read, the returned records are not in a
// simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder.
List<Long> expected = new ArrayList<>();
List<Long> actual = new ArrayList<>();
for (long i = initialNumElements; i < numElements; i++) {
advanceOnce(reader, i > initialNumElements);
expected.add(i);
actual.add(reader.getCurrent().getKV().getValue());
}
assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray()));
}
@Test
public void testUnboundedSourceMetrics() {
int numElements = 1000;
String readStep = "readFromKafka";
p.apply(readStep,
mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata());
PipelineResult result = p.run();
String splitId = "0";
MetricName elementsRead = SourceMetrics.elementsRead().getName();
MetricName elementsReadBySplit = SourceMetrics.elementsReadBySplit(splitId).getName();
MetricName bytesRead = SourceMetrics.bytesRead().getName();
MetricName bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId).getName();
MetricName backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId).getName();
MetricName backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId).getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder().build());
Iterable<MetricResult<Long>> counters = metrics.counters();
assertThat(counters, hasItem(attemptedMetricsResult(
elementsRead.namespace(),
elementsRead.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
elementsReadBySplit.namespace(),
elementsReadBySplit.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesRead.namespace(),
bytesRead.name(),
readStep,
12000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesReadBySplit.namespace(),
bytesReadBySplit.name(),
readStep,
12000L)));
MetricQueryResults backlogElementsMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogElementsOfSplit.namespace(),
backlogElementsOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogElementsMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
MetricQueryResults backlogBytesMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogBytesOfSplit.namespace(),
backlogBytesOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogBytesMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
}
@Test
public void testSink() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the records
// are correctly published to mock kafka producer.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testValuesSink() throws Exception {
// similar to testSink(), but use values()' interface.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create()) // there are no keys
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
.values());
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true);
}
}
@Test
public void testEOSink() {
// testSink() with EOS enabled.
// This does not actually inject retries in a stage to test exactly-once-semantics.
// It mainly exercises the code in normal flow without retries.
// Ideally we should test EOS Sink by triggering replays of a messages between stages.
// It is not feasible to test such retries with direct runner. When DoFnTester supports
// state, we can test KafkaEOWriter DoFn directly to ensure it handles retries correctly.
if (!ProducerSpEL.supportsTransactions()) {
LOG.warn("testEOSink() is disabled as Kafka client version does not support transactions.");
return;
}
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withEOS(1, "test")
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST))
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testSinkWithSendErrors() throws Throwable {
// similar to testSink(), except that up to 10 of the send calls to producer will fail
// asynchronously.
// TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail.
// We limit the number of errors injected to 10 below. This would reflect a real streaming
// pipeline. But I am sure how to achieve that. For now expect an exception:
thrown.expect(InjectedErrorException.class);
thrown.expectMessage("Injected Error #1");
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThreadWithErrors =
new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
try {
p.run();
} catch (PipelineExecutionException e) {
// throwing inner exception helps assert that first exception is thrown from the Sink
throw e.getCause().getCause();
} finally {
completionThreadWithErrors.shutdown();
}
}
}
@Test
public void testUnboundedSourceStartReadTime() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can read half elements.
int startTime = numElements / 20 / 2;
int maxNumRecords = numElements / 2;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, maxNumRecords, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, maxNumRecords, maxNumRecords, maxNumRecords, numElements - 1);
p.run();
}
@Rule public ExpectedException noMessagesException = ExpectedException.none();
@Test
public void testUnboundedSourceStartReadTimeException() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
noMessagesException.expect(RuntimeException.class);
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can not read any element.
int startTime = numElements / 20;
p.apply(mkKafkaReadTransform(numElements, numElements, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
p.run();
}
@Test
public void testSourceDisplayData() {
KafkaIO.Read<Integer, Long> read = mkKafkaReadTransform(10, null);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topics", "topic_a,topic_b"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
KafkaIO.Read<byte[], Long> read = KafkaIO.<byte[], Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5),
new TopicPartition("test", 6)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSinkDisplayData() {
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
KafkaIO.Write<Integer, Long> write = KafkaIO.<Integer, Long>write()
.withBootstrapServers("myServerA:9092,myServerB:9092")
.withTopic("myTopic")
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey));
DisplayData displayData = DisplayData.from(write);
assertThat(displayData, hasDisplayItem("topic", "myTopic"));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092"));
assertThat(displayData, hasDisplayItem("retries", 3));
}
}
// interface for testing coder inference
private interface DummyInterface<T> {
}
// interface for testing coder inference
private interface DummyNonparametricInterface {
}
// class for testing coder inference
private static class DeserializerWithInterfaces
implements DummyInterface<String>, DummyNonparametricInterface,
Deserializer<Long> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Long deserialize(String topic, byte[] bytes) {
return 0L;
}
@Override
public void close() {
}
}
// class for which a coder cannot be infered
private static class NonInferableObject {
}
// class for testing coder inference
private static class NonInferableObjectDeserializer
implements Deserializer<NonInferableObject> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public NonInferableObject deserialize(String topic, byte[] bytes) {
return new NonInferableObject();
}
@Override
public void close() {
}
}
@Test
public void testInferKeyCoder() {
CoderRegistry registry = CoderRegistry.createDefault();
assertTrue(KafkaIO.inferCoder(registry, LongDeserializer.class).getValueCoder()
instanceof VarLongCoder);
assertTrue(KafkaIO.inferCoder(registry, StringDeserializer.class).getValueCoder()
instanceof StringUtf8Coder);
assertTrue(KafkaIO.inferCoder(registry, InstantDeserializer.class).getValueCoder()
instanceof InstantCoder);
assertTrue(KafkaIO.inferCoder(registry, DeserializerWithInterfaces.class).getValueCoder()
instanceof VarLongCoder);
}
@Rule public ExpectedException cannotInferException = ExpectedException.none();
@Test
public void testInferKeyCoderFailure() throws Exception {
cannotInferException.expect(RuntimeException.class);
CoderRegistry registry = CoderRegistry.createDefault();
KafkaIO.inferCoder(registry, NonInferableObjectDeserializer.class);
}
@Test
public void testSinkMetrics() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the metrics are reported.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply("writeToKafka", KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
PipelineResult result = p.run();
MetricName elementsWritten = SinkMetrics.elementsWritten().getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.inNamespace(elementsWritten.namespace()))
.build());
assertThat(metrics.counters(), hasItem(
attemptedMetricsResult(
elementsWritten.namespace(),
elementsWritten.name(),
"writeToKafka",
1000L)));
completionThread.shutdown();
}
}
private static void verifyProducerRecords(MockProducer<Integer, Long> mockProducer,
String topic, int numElements, boolean keyIsAbsent) {
// verify that appropriate messages are written to kafka
List<ProducerRecord<Integer, Long>> sent = mockProducer.history();
// sort by values
Collections.sort(sent, new Comparator<ProducerRecord<Integer, Long>>() {
@Override
public int compare(ProducerRecord<Integer, Long> o1, ProducerRecord<Integer, Long> o2) {
return Long.compare(o1.value(), o2.value());
}
});
for (int i = 0; i < numElements; i++) {
ProducerRecord<Integer, Long> record = sent.get(i);
assertEquals(topic, record.topic());
if (keyIsAbsent) {
assertNull(record.key());
} else {
assertEquals(i, record.key().intValue());
}
assertEquals(i, record.value().longValue());
}
}
/**
* This wrapper over MockProducer. It also places the mock producer in global MOCK_PRODUCER_MAP.
* The map is needed so that the producer returned by ProducerFactoryFn during pipeline can be
* used in verification after the test. We also override {@code flush()} method in MockProducer
* so that test can control behavior of {@code send()} method (e.g. to inject errors).
*/
private static class MockProducerWrapper implements AutoCloseable {
final String producerKey;
final MockProducer<Integer, Long> mockProducer;
// MockProducer has "closed" method starting version 0.11.
private static Method closedMethod;
static {
try {
closedMethod = MockProducer.class.getMethod("closed");
} catch (NoSuchMethodException e) {
closedMethod = null;
}
}
MockProducerWrapper() {
producerKey = String.valueOf(ThreadLocalRandom.current().nextLong());
mockProducer = new MockProducer<Integer, Long>(
false, // disable synchronous completion of send. see ProducerSendCompletionThread below.
new IntegerSerializer(),
new LongSerializer()) {
// override flush() so that it does not complete all the waiting sends, giving a chance to
// ProducerCompletionThread to inject errors.
@Override
public void flush() {
while (completeNext()) {
// there are some uncompleted records. let the completion thread handle them.
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
};
// Add the producer to the global map so that producer factory function can access it.
assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer));
}
public void close() {
MOCK_PRODUCER_MAP.remove(producerKey);
try {
if (closedMethod == null || !((Boolean) closedMethod.invoke(mockProducer))) {
mockProducer.close();
}
} catch (Exception e) { // Not expected.
throw new RuntimeException(e);
}
}
}
private static final ConcurrentMap<String, MockProducer<Integer, Long>> MOCK_PRODUCER_MAP =
new ConcurrentHashMap<>();
private static class ProducerFactoryFn
implements SerializableFunction<Map<String, Object>, Producer<Integer, Long>> {
final String producerKey;
ProducerFactoryFn(String producerKey) {
this.producerKey = producerKey;
}
@SuppressWarnings("unchecked")
@Override
public Producer<Integer, Long> apply(Map<String, Object> config) {
// Make sure the config is correctly set up for serializers.
// There may not be a key serializer if we're interested only in values.
if (config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) != null) {
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, true);
}
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, false);
// Returning same producer in each instance in a pipeline seems to work fine currently.
// If DirectRunner creates multiple DoFn instances for sinks, we might need to handle
// it appropriately. I.e. allow multiple producers for each producerKey and concatenate
// all the messages written to each producer for verification after the pipeline finishes.
return MOCK_PRODUCER_MAP.get(producerKey);
}
}
private static class InjectedErrorException extends RuntimeException {
InjectedErrorException(String message) {
super(message);
}
}
/**
* We start MockProducer with auto-completion disabled. That implies a record is not marked sent
* until #completeNext() is called on it. This class starts a thread to asynchronously 'complete'
* the the sends. During completion, we can also make those requests fail. This error injection
* is used in one of the tests.
*/
private static class ProducerSendCompletionThread {
private final MockProducer<Integer, Long> mockProducer;
private final int maxErrors;
private final int errorFrequency;
private final AtomicBoolean done = new AtomicBoolean(false);
private final ExecutorService injectorThread;
private int numCompletions = 0;
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer) {
// complete everything successfully
this(mockProducer, 0, 0);
}
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer,
int maxErrors,
int errorFrequency) {
this.mockProducer = mockProducer;
this.maxErrors = maxErrors;
this.errorFrequency = errorFrequency;
injectorThread = Executors.newSingleThreadExecutor();
}
ProducerSendCompletionThread start() {
injectorThread.submit(new Runnable() {
@Override
public void run() {
int errorsInjected = 0;
while (!done.get()) {
boolean successful;
if (errorsInjected < maxErrors && ((numCompletions + 1) % errorFrequency) == 0) {
successful = mockProducer.errorNext(
new InjectedErrorException("Injected Error #" + (errorsInjected + 1)));
if (successful) {
errorsInjected++;
}
} else {
successful = mockProducer.completeNext();
}
if (successful) {
numCompletions++;
} else {
// wait a bit since there are no unsent records
try {
Thread.sleep(1);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
}
});
return this;
}
void shutdown() {
done.set(true);
injectorThread.shutdown();
try {
assertTrue(injectorThread.awaitTermination(10, TimeUnit.SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}
|
Java
|
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file implements a test for CHIP Callback
*
*/
#include <lib/core/CHIPCallback.h>
#include <lib/support/CHIPMem.h>
#include <lib/support/UnitTestRegistration.h>
#include <nlunit-test.h>
using namespace chip::Callback;
/**
* An example Callback registrar. Resumer::Resume() accepts Callbacks
* to be run during the next call to Resumer::Dispatch(). In an environment
* completely driven by callbacks, an application's main() would just call
* something like Resumer::Dispatch() in a loop.
*/
class Resumer : private CallbackDeque
{
public:
/**
* @brief run this callback next Dispatch
*/
void Resume(Callback<> * cb)
{
// always first thing: cancel to take ownership of
// cb members
Enqueue(cb->Cancel());
}
void Dispatch()
{
Cancelable ready;
DequeueAll(ready);
// runs the ready list
while (ready.mNext != &ready)
{
Callback<> * cb = Callback<>::FromCancelable(ready.mNext);
// one-shot semantics
cb->Cancel();
cb->mCall(cb->mContext);
}
}
};
static void increment(int * v)
{
(*v)++;
}
struct Resume
{
Callback<> * cb;
Resumer * resumer;
};
static void resume(struct Resume * me)
{
me->resumer->Resume(me->cb);
}
static void canceler(Cancelable * ca)
{
ca->Cancel();
}
static void ResumerTest(nlTestSuite * inSuite, void * inContext)
{
int n = 1;
Callback<> cb(reinterpret_cast<CallFn>(increment), &n);
Callback<> cancelcb(reinterpret_cast<CallFn>(canceler), cb.Cancel());
Resumer resumer;
// Resume() works
resumer.Resume(&cb);
resumer.Dispatch();
resumer.Resume(&cb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 3);
n = 1;
// test cb->Cancel() cancels
resumer.Resume(&cb);
cb.Cancel();
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 1);
n = 1;
// Cancel cb before Dispatch() gets around to us (tests FIFO *and* cancel() from readylist)
resumer.Resume(&cancelcb);
resumer.Resume(&cb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 1);
n = 1;
// 2nd Resume() cancels first registration
resumer.Resume(&cb);
resumer.Resume(&cb); // cancels previous registration
resumer.Dispatch(); // runs the list
resumer.Dispatch(); // runs an empty list
NL_TEST_ASSERT(inSuite, n == 2);
n = 1;
// Resume() during Dispatch() runs only once, but enqueues for next dispatch
struct Resume res = { .cb = &cb, .resumer = &resumer };
Callback<> resumecb(reinterpret_cast<CallFn>(resume), &res);
resumer.Resume(&cb);
resumer.Resume(&resumecb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 3);
Callback<> * pcb = chip::Platform::New<Callback<>>(reinterpret_cast<CallFn>(increment), &n);
n = 1;
// cancel on destruct
resumer.Resume(pcb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
resumer.Resume(pcb);
chip::Platform::Delete(pcb);
resumer.Dispatch();
NL_TEST_ASSERT(inSuite, n == 2);
}
/**
* An example Callback registrar. Notifier implements persistently-registered
* semantics, and uses Callbacks with a non-default signature.
*/
class Notifier : private CallbackDeque
{
public:
typedef void (*NotifyFn)(void *, int);
/**
* run all the callers
*/
void Notify(int v)
{
for (Cancelable * ca = mNext; ca != this; ca = ca->mNext)
{
// persistent registration semantics, with data
Callback<NotifyFn> * cb = Callback<NotifyFn>::FromCancelable(ca);
cb->mCall(cb->mContext, v);
}
}
/**
* @brief example
*/
static void Cancel(Cancelable * cb)
{
Dequeue(cb); // take off ready list
}
/**
* @brief illustrate a case where this needs notification of cancellation
*/
void Register(Callback<NotifyFn> * cb) { Enqueue(cb->Cancel(), Cancel); }
};
static void increment_by(int * n, int by)
{
*n += by;
}
static void NotifierTest(nlTestSuite * inSuite, void * inContext)
{
int n = 1;
Callback<Notifier::NotifyFn> cb(reinterpret_cast<Notifier::NotifyFn>(increment_by), &n);
Callback<Notifier::NotifyFn> cancelcb(reinterpret_cast<Notifier::NotifyFn>(canceler), cb.Cancel());
// safe to call anytime
cb.Cancel();
Notifier notifier;
// Simple stuff works, e.g. and there's persistent registration
notifier.Register(&cb);
notifier.Notify(1);
notifier.Notify(8);
NL_TEST_ASSERT(inSuite, n == 10);
n = 1;
// Cancel cb before Dispatch() gets around to us (tests FIFO *and* cancel() from readylist)
notifier.Register(&cancelcb);
notifier.Register(&cb);
notifier.Notify(8);
NL_TEST_ASSERT(inSuite, n == 1);
cb.Cancel();
cancelcb.Cancel();
}
/**
* Set up the test suite.
*/
int TestCHIPCallback_Setup(void * inContext)
{
CHIP_ERROR error = chip::Platform::MemoryInit();
if (error != CHIP_NO_ERROR)
return FAILURE;
return SUCCESS;
}
/**
* Tear down the test suite.
*/
int TestCHIPCallback_Teardown(void * inContext)
{
chip::Platform::MemoryShutdown();
return SUCCESS;
}
/**
* Test Suite. It lists all the test functions.
*/
// clang-format off
static const nlTest sTests[] =
{
NL_TEST_DEF("ResumerTest", ResumerTest),
NL_TEST_DEF("NotifierTest", NotifierTest),
NL_TEST_SENTINEL()
};
// clang-format on
int TestCHIPCallback(void)
{
// clang-format off
nlTestSuite theSuite =
{
"CHIPCallback",
&sTests[0],
TestCHIPCallback_Setup,
TestCHIPCallback_Teardown
};
// clang-format on
nlTestRunner(&theSuite, nullptr);
return (nlTestRunnerStats(&theSuite));
}
CHIP_REGISTER_TEST_SUITE(TestCHIPCallback)
|
Java
|
/*
* Copyright 2017 Exorath
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.exorath.plugin.game.cakewars.rewards;
import com.exorath.plugin.game.cakewars.Main;
import com.exorath.service.currency.api.CurrencyServiceAPI;
import com.exorath.victoryHandler.rewards.CurrencyReward;
import net.md_5.bungee.api.ChatColor;
/**
* Created by toonsev on 5/31/2017.
*/
public class KillsReward extends CurrencyReward{
public static final int CRUMBS_PER_KILL = 2;
private int kills;
public KillsReward(CurrencyServiceAPI currencyServiceAPI) {
super(null, currencyServiceAPI, Main.CRUMBS_CURRENCY, 0);
setCurrencyColor(ChatColor.GOLD);
setCurrencyName("Crumbs");
}
public void addKill(){
kills++;
setAmount(kills*CRUMBS_PER_KILL);
setReason("Killing " + kills + " Players");
}
}
|
Java
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
|
Java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.irc;
import java.util.ArrayList;
import java.util.Dictionary;
import java.util.Hashtable;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.schwering.irc.lib.IRCConnection;
import org.schwering.irc.lib.IRCEventAdapter;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class IrcEndpointTest {
private IrcComponent component;
private IrcConfiguration configuration;
private IRCConnection connection;
private IrcEndpoint endpoint;
@Before
public void doSetup() {
component = mock(IrcComponent.class);
configuration = mock(IrcConfiguration.class);
connection = mock(IRCConnection.class);
List<String> channels = new ArrayList<String>();
Dictionary<String, String> keys = new Hashtable<String, String>();
channels.add("chan1");
channels.add("chan2");
keys.put("chan1", "");
keys.put("chan2", "chan2key");
when(configuration.getChannels()).thenReturn(channels);
when(configuration.getKey("chan1")).thenReturn("");
when(configuration.getKey("chan2")).thenReturn("chan2key");
when(component.getIRCConnection(configuration)).thenReturn(connection);
endpoint = new IrcEndpoint("foo", component, configuration);
}
@Test
public void doJoinChannelTestNoKey() throws Exception {
endpoint.joinChannel("chan1");
verify(connection).doJoin("chan1");
}
@Test
public void doJoinChannelTestKey() throws Exception {
endpoint.joinChannel("chan2");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doJoinChannels() throws Exception {
endpoint.joinChannels();
verify(connection).doJoin("chan1");
verify(connection).doJoin("chan2", "chan2key");
}
@Test
public void doHandleIrcErrorNickInUse() throws Exception {
when(connection.getNick()).thenReturn("nick");
endpoint.handleIrcError(IRCEventAdapter.ERR_NICKNAMEINUSE, "foo");
verify(connection).doNick("nick-");
when(connection.getNick()).thenReturn("nick---");
// confirm doNick was not called
verify(connection, never()).doNick("foo");
}
}
|
Java
|
<?php
namespace Google\AdsApi\AdManager\v202111;
/**
* This file was generated from WSDL. DO NOT EDIT.
*/
class PrecisionError extends \Google\AdsApi\AdManager\v202111\ApiError
{
/**
* @var string $reason
*/
protected $reason = null;
/**
* @param string $fieldPath
* @param \Google\AdsApi\AdManager\v202111\FieldPathElement[] $fieldPathElements
* @param string $trigger
* @param string $errorString
* @param string $reason
*/
public function __construct($fieldPath = null, array $fieldPathElements = null, $trigger = null, $errorString = null, $reason = null)
{
parent::__construct($fieldPath, $fieldPathElements, $trigger, $errorString);
$this->reason = $reason;
}
/**
* @return string
*/
public function getReason()
{
return $this->reason;
}
/**
* @param string $reason
* @return \Google\AdsApi\AdManager\v202111\PrecisionError
*/
public function setReason($reason)
{
$this->reason = $reason;
return $this;
}
}
|
Java
|
// STLport regression testsuite component.
// To compile as a separate example, please #define MAIN.
#include <algorithm>
#include <iostream>
#include "unary.h"
#ifdef MAIN
#define bcompos1_test main
#endif
#if !defined (STLPORT) || defined(__STL_USE_NAMESPACES)
using namespace std;
#endif
int bcompos1_test(int, char**)
{
cout<<"Results of bcompos1_test:"<<endl;
int array [6] = { -2, -1, 0, 1, 2, 3 };
binary_compose<logical_and<bool>, odd, positive>
b = binary_compose<logical_and<bool>, odd, positive>
(logical_and<bool>(), odd(), positive());
int* p = find_if((int*)array, (int*)array + 6, b);
if(p != array + 6)
cout << *p << " is odd and positive" << endl;
return 0;
}
|
Java
|
package com.zxinsight.classifier.ruleengine.admin;
import java.rmi.RemoteException;
import java.util.Map;
import javax.rules.admin.LocalRuleExecutionSetProvider;
import javax.rules.admin.RuleAdministrator;
import javax.rules.admin.RuleExecutionSet;
import javax.rules.admin.RuleExecutionSetDeregistrationException;
import javax.rules.admin.RuleExecutionSetProvider;
import javax.rules.admin.RuleExecutionSetRegisterException;
@SuppressWarnings("rawtypes")
public class RuleAdministratorImpl implements RuleAdministrator {
@Override
public void deregisterRuleExecutionSet(String bindUri, Map properties)
throws RuleExecutionSetDeregistrationException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
if (repository.getRuleExecutionSet(bindUri) == null) {
throw new RuleExecutionSetDeregistrationException(
"no execution set bound to: " + bindUri);
}
repository.unregisterRuleExecutionSet(bindUri);
}
@Override
public LocalRuleExecutionSetProvider getLocalRuleExecutionSetProvider(
Map properties) throws RemoteException {
return new LocalRuleExecutionSetProviderImple();
}
@Override
public RuleExecutionSetProvider getRuleExecutionSetProvider(Map properties)
throws RemoteException {
return new RuleExecutionSetProviderImpl();
}
@Override
public void registerRuleExecutionSet(String bindUri,
RuleExecutionSet ruleExecutionSet, Map properties)
throws RuleExecutionSetRegisterException, RemoteException {
RuleExecutionSetRepository repository = RuleExecutionSetRepository
.getInstance();
repository.registerRuleExecutionSet(bindUri, ruleExecutionSet);
}
}
|
Java
|
'use strict';
angular.module('playgroundApp', [
'playgroundApp.filters',
'playgroundApp.services',
'playgroundApp.directives',
'ngRoute',
'ui.bootstrap',
'ui',
])
.config(function($locationProvider, $routeProvider, $httpProvider,
$dialogProvider) {
$locationProvider.html5Mode(true);
// TODO: add list of promises to be resolved for injection
// TODO: resolved promises are injected into controller
// TODO: see http://www.youtube.com/watch?v=P6KITGRQujQ
$routeProvider
.when('/playground/', {
templateUrl: '/playground/main.html',
controller: MainController,
})
.when('/playground/p/:project_id/', {
templateUrl: '/playground/project.html',
controller: ProjectController,
reloadOnSearch: false,
});
$httpProvider.interceptors.push('pgHttpInterceptor');
// TODO: test these defaults?
$dialogProvider.options({
backdropFade: true,
modalFade: true,
});
})
.value('ui.config', {
codemirror: {
lineNumbers: true,
matchBrackets: true,
autofocus: true,
undoDepth: 440, // default = 40
}
});
|
Java
|
name 'machines'
maintainer 'YOUR_NAME'
maintainer_email 'YOUR_EMAIL'
license 'All rights reserved'
description 'Installs/Configures machines'
long_description 'Installs/Configures machines'
version '0.1.0'
|
Java
|
# Deventropy Shared Utils
See **[project website](http://www.deventropy.org/shared-utils/)** for more information.
|
Java
|
package trendli.me.makhana.common.entities;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public enum ActionType
{
MOVE( "Moving", "newTile" ), FABRICATING( "Fabricating" );
private final String verb;
private final List< String > dataKeys;
private ActionType( String verb, String... dataKeys )
{
this.verb = verb;
if ( dataKeys != null )
{
this.dataKeys = Arrays.asList( dataKeys );
}
else
{
this.dataKeys = Collections.emptyList( );
}
}
/**
* @return the dataKeys
*/
public List< String > getDataKeys( )
{
return dataKeys;
}
/**
* @return the verb
*/
public String getVerb( )
{
return verb;
}
}
|
Java
|
/*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kilt
import (
"github.com/google/kilt/pkg/rework"
log "github.com/golang/glog"
"github.com/spf13/cobra"
)
var reworkCmd = &cobra.Command{
Use: "rework",
Short: "Rework the patches belonging to patchsets",
Long: `Rework patchsets, allowing patches to be redistributed and re-ordered in the
branch. The rework command will create a working area detached form the current
kilt branch where modifications can be staged without changing the original
branch.
Kilt will examine the patchsets in the branch and determine which patches
belonging to patchsets need to be reworked, and create a queue of operations
that the user will drive. The user can also perform other rework-related
operations, such as re-ordering or merging patches.
Once the user is finished, kilt will verify that the rework is valid, and
modify the previous kilt branch to point to the result of the rework. A rework
is considered valid if the end state is identical to the initial state -- the
diff between them is empty.`,
Args: argsRework,
Run: runRework,
}
var reworkFlags = struct {
begin bool
finish bool
validate bool
rContinue bool
abort bool
skip bool
force bool
auto bool
patchsets []string
all bool
}{}
func init() {
rootCmd.AddCommand(reworkCmd)
reworkCmd.Flags().BoolVar(&reworkFlags.begin, "begin", true, "begin rework")
reworkCmd.Flags().MarkHidden("begin")
reworkCmd.Flags().BoolVar(&reworkFlags.finish, "finish", false, "validate and finish rework")
reworkCmd.Flags().BoolVar(&reworkFlags.abort, "abort", false, "abort rework")
reworkCmd.Flags().BoolVarP(&reworkFlags.force, "force", "f", false, "when finishing, force finish rework, regardless of validation")
reworkCmd.Flags().BoolVar(&reworkFlags.validate, "validate", false, "validate rework")
reworkCmd.Flags().BoolVar(&reworkFlags.rContinue, "continue", false, "continue rework")
reworkCmd.Flags().BoolVar(&reworkFlags.skip, "skip", false, "skip rework step")
reworkCmd.Flags().BoolVar(&reworkFlags.auto, "auto", false, "attempt to automatically complete rework")
reworkCmd.Flags().BoolVarP(&reworkFlags.all, "all", "a", false, "specify all patchsets for rework")
reworkCmd.Flags().StringSliceVarP(&reworkFlags.patchsets, "patchset", "p", nil, "specify individual patchset for rework")
}
func argsRework(*cobra.Command, []string) error {
return nil
}
func runRework(cmd *cobra.Command, args []string) {
var c *rework.Command
var err error
switch {
case reworkFlags.finish:
reworkFlags.auto = true
c, err = rework.NewFinishCommand(reworkFlags.force)
case reworkFlags.abort:
c, err = rework.NewAbortCommand()
case reworkFlags.skip:
c, err = rework.NewSkipCommand()
case reworkFlags.validate:
c, err = rework.NewValidateCommand()
case reworkFlags.rContinue:
c, err = rework.NewContinueCommand()
case reworkFlags.begin:
targets := []rework.TargetSelector{rework.FloatingTargets{}}
if reworkFlags.all {
targets = append(targets, rework.AllTargets{})
} else if len(reworkFlags.patchsets) > 0 {
for _, p := range reworkFlags.patchsets {
targets = append(targets, rework.PatchsetTarget{Name: p})
}
}
c, err = rework.NewBeginCommand(targets...)
default:
log.Exitf("No operation specified")
}
if err != nil {
log.Exitf("Rework failed: %v", err)
}
if reworkFlags.auto {
err = c.ExecuteAll()
} else {
err = c.Execute()
}
if err != nil {
log.Errorf("Rework failed: %v", err)
}
if err = c.Save(); err != nil {
log.Exitf("Failed to save rework state: %v", err)
}
}
|
Java
|
/*
* Copyright 2014-2015 Nikos Grammatikos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://raw.githubusercontent.com/nikosgram13/OglofusProtection/master/LICENSE
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.nikosgram.oglofus.protection;
import com.google.common.base.Optional;
import com.sk89q.intake.argument.ArgumentException;
import com.sk89q.intake.argument.ArgumentParseException;
import com.sk89q.intake.argument.CommandArgs;
import com.sk89q.intake.parametric.ProvisionException;
import me.nikosgram.oglofus.protection.api.ActionResponse;
import me.nikosgram.oglofus.protection.api.CommandExecutor;
import me.nikosgram.oglofus.protection.api.entity.User;
import me.nikosgram.oglofus.protection.api.message.MessageType;
import me.nikosgram.oglofus.protection.api.region.ProtectionRank;
import me.nikosgram.oglofus.protection.api.region.ProtectionRegion;
import me.nikosgram.oglofus.protection.api.region.ProtectionStaff;
import org.apache.commons.lang3.ClassUtils;
import org.spongepowered.api.entity.player.Player;
import org.spongepowered.api.service.user.UserStorage;
import org.spongepowered.api.util.command.CommandSource;
import javax.annotation.Nullable;
import java.lang.annotation.Annotation;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class OglofusProtectionStaff implements ProtectionStaff {
private final List<User> staff = new ArrayList<User>();
private final Map<UUID, ProtectionRank> ranks = new HashMap<UUID, ProtectionRank>();
private final User owner;
private final ProtectionRegion region;
private final OglofusSponge sponge;
protected OglofusProtectionStaff(ProtectionRegion region, OglofusSponge sponge) {
this.region = region;
this.sponge = sponge;
owner = sponge.getUserManager().getUser(UUID.fromString(sponge.connector.getString(
"oglofus_regions", "uuid", region.getUuid().toString(), "owner"
).get())).get();
Map<String, String> staff = sponge.connector.getStringMap(
"oglofus_regions", "uuid", region.getUuid().toString(), new String[]{"player", "rank"}
);
for (String uid : staff.keySet()) {
UUID uuid = UUID.fromString(uid);
this.staff.add(sponge.getUserManager().getUser(uuid).get());
ranks.put(uuid, ProtectionRank.valueOf(staff.get(uid)));
}
}
@Override
public UUID getOwnerUuid() {
return owner.getUuid();
}
@Override
public User getOwner() {
return owner;
}
@Override
@SuppressWarnings("unchecked")
public <T> Optional<T> getOwnerAs(Class<T> tClass) {
if (ClassUtils.isAssignable(tClass, Player.class)) {
return (Optional<T>) sponge.server.getPlayer(owner.getUuid());
} else if (ClassUtils.isAssignable(tClass, User.class)) {
UserStorage storage;
if ((storage = sponge.game.getServiceManager().provide(UserStorage.class).orNull()) !=
null) {
return (Optional<T>) storage.get(owner.getUuid()).orNull();
}
}
return Optional.absent();
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getOfficersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getOfficersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getOfficersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getOfficers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getOfficers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Officer)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getMembersAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (UUID uuid : getMembersUuid()) {
Player player;
if ((player = sponge.server.getPlayer(uuid).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getMembersUuid() {
List<UUID> returned = new ArrayList<UUID>();
for (User user : getMembers()) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public Collection<User> getMembers() {
List<User> returned = new ArrayList<User>();
for (User user : this) {
if (ranks.get(user.getUuid()).equals(ProtectionRank.Member)) {
returned.add(user);
}
}
return returned;
}
@Override
@SuppressWarnings("unchecked")
public <T> Collection<T> getStaffAs(Class<T> tClass) {
List<T> returned = new ArrayList<T>();
if (ClassUtils.isAssignable(tClass, Player.class)) {
for (User user : this) {
Player player;
if ((player = sponge.server.getPlayer(user.getUuid()).orNull()) != null) {
returned.add((T) player);
}
}
}
return returned;
}
@Override
public Collection<UUID> getStaffUuid() {
Collection<UUID> returned = new ArrayList<UUID>();
for (User user : this) {
returned.add(user.getUuid());
}
return returned;
}
@Override
public boolean isOwner(UUID target) {
return owner.getUuid().equals(target);
}
@Override
public boolean isOwner(User target) {
return owner.getUuid().equals(target.getUuid());
}
@Override
public boolean isOfficer(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Officer);
}
@Override
public boolean isOfficer(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Officer);
}
@Override
public boolean isMember(UUID target) {
return ranks.containsKey(target) && ranks.get(target).equals(ProtectionRank.Member);
}
@Override
public boolean isMember(User target) {
return ranks.containsKey(target.getUuid()) && ranks.get(target.getUuid()).equals(ProtectionRank.Member);
}
@Override
public boolean isStaff(UUID target) {
return ranks.containsKey(target);
}
@Override
public boolean isStaff(User target) {
return ranks.containsKey(target.getUuid());
}
@Override
public boolean hasOwnerAccess(UUID target) {
return isOwner(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOwnerAccess(User target) {
return isOwner(target) || target.hasPermission("oglofus.protection.bypass.owner");
}
@Override
public boolean hasOfficerAccess(UUID target) {
return isOfficer(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasOfficerAccess(User target) {
return isOfficer(target) || target.hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(UUID target) {
return isMember(target) || sponge.getUserManager().getUser(target).get().hasPermission("oglofus.protection.bypass.officer");
}
@Override
public boolean hasMemberAccess(User target) {
return isMember(target) || target.hasPermission("oglofus.protection.bypass.member");
}
@Override
public ProtectionRank getRank(UUID target) {
return ranks.containsKey(target) ? ranks.get(target) : ProtectionRank.None;
}
@Override
public ProtectionRank getRank(User target) {
return ranks.containsKey(target.getUuid()) ? ranks.get(target.getUuid()) : ProtectionRank.None;
}
@Override
public void broadcast(String message) {
broadcast(MessageType.CHAT, message);
}
@Override
public void broadcast(String message, ProtectionRank rank) {
broadcast(MessageType.CHAT, message, rank);
}
@Override
public void broadcast(MessageType type, String message) {
for (User user : this) {
user.sendMessage(type, message);
}
}
@Override
public void broadcast(MessageType type, String message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(type, message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(type, message);
}
break;
case Owner:
owner.sendMessage(type, message);
break;
}
}
@Override
public void broadcastRaw(Object message) {
for (User user : this) {
user.sendMessage(message);
}
}
@Override
public void broadcastRaw(Object message, ProtectionRank rank) {
switch (rank) {
case Member:
for (User user : getMembers()) {
user.sendMessage(message);
}
break;
case Officer:
for (User user : getOfficers()) {
user.sendMessage(message);
}
break;
case Owner:
owner.sendMessage(message);
break;
}
}
@Override
public void broadcastRaw(MessageType type, Object message) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public void broadcastRaw(MessageType type, Object message, ProtectionRank rank) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public ActionResponse reFlag() {
//TODO: make it.
return null;
}
@Override
public ActionResponse invite(Object sender, UUID target) {
return sponge.getUserManager().invite(sender, target, region);
}
@Override
public ActionResponse invite(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse invite(Object sender, User target) {
return null;
}
@Override
public ActionResponse invite(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse invite(UUID target) {
return sponge.getUserManager().invite(target, region);
}
@Override
public ActionResponse invite(User target) {
return null;
}
@Override
public ActionResponse kick(Object sender, UUID target) {
if (sender instanceof CommandSource) {
if (sender instanceof Player) {
if (region.getProtectionStaff().hasOwnerAccess(((Player) sender).getUniqueId())) {
//TODO: call the handler PlayerKickHandler.
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
if (((CommandSource) sender).hasPermission("oglofus.protection.bypass")) {
return kick(target);
}
return ActionResponse.Failure.setMessage("access");
}
return ActionResponse.Failure.setMessage("object");
}
@Override
public ActionResponse kick(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse kick(Object sender, User target) {
return null;
}
@Override
public ActionResponse kick(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse kick(UUID target) {
//TODO: call the handler PlayerKickHandler.
return null;
}
@Override
public ActionResponse kick(User target) {
return null;
}
@Override
public ActionResponse promote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse promote(Object sender, User target) {
return null;
}
@Override
public ActionResponse promote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse promote(UUID target) {
return null;
}
@Override
public ActionResponse promote(User target) {
return null;
}
@Override
public ActionResponse demote(Object sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, UUID target) {
return null;
}
@Override
public ActionResponse demote(Object sender, User target) {
return null;
}
@Override
public ActionResponse demote(CommandExecutor sender, User target) {
return null;
}
@Override
public ActionResponse demote(UUID target) {
return null;
}
@Override
public ActionResponse demote(User target) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(Object sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(CommandExecutor sender, User target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(UUID target, ProtectionRank rank) {
return null;
}
@Override
public ActionResponse changeRank(User target, ProtectionRank rank) {
return null;
}
@Override
public Iterator<User> iterator() {
return staff.iterator();
}
@Override
public boolean isProvided() {
return false;
}
@Nullable
@Override
public User get(CommandArgs arguments, List<? extends Annotation> modifiers) throws ArgumentException, ProvisionException {
String name = arguments.next();
Optional<User> user = sponge.getUserManager().getUser(name);
if (user.isPresent() && isStaff(user.get())) {
return user.get();
} else {
throw new ArgumentParseException(String.format("I can't find the Staff with name '%s'.", name));
}
}
@Override
public List<String> getSuggestions(String prefix) {
List<String> returned = new ArrayList<String>();
for (User user : this) {
if (user.getName().startsWith(prefix)) {
returned.add(user.getName());
}
}
return returned;
}
}
|
Java
|
package commons;
import org.makagiga.commons.ConfigFile;
import org.makagiga.test.AbstractEnumTest;
import org.makagiga.test.Test;
import org.makagiga.test.TestMethod;
import org.makagiga.test.Tester;
@Test(className = ConfigFile.Format.class)
public final class TestConfigFile_Format extends AbstractEnumTest<ConfigFile.Format> {
// public
public TestConfigFile_Format() {
super(
ConfigFile.Format.values(),
ConfigFile.Format.DESKTOP, ConfigFile.Format.INI
);
}
@Test
public void test_commons() {
for (final ConfigFile.Format i : ConfigFile.Format.values()) {
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateGroup("");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey(null);
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
i.validateKey("");
}
} );
}
final String LONG_VALUE = "AZaz09-";
final String SHORT_VALUE = "X";
// DESKTOP
ConfigFile.Format f = ConfigFile.Format.DESKTOP;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("[");
}
} );
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateGroup("]");
}
} );
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assertIllegalArgumentException(new Tester.Code() {
public void run() throws Throwable {
ConfigFile.Format.DESKTOP.validateKey("=");
}
} );
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
f.validateGroup(" ");
f.validateGroup("Foo Bar");
// INI
f = ConfigFile.Format.INI;
assert f.validateGroup(SHORT_VALUE) == SHORT_VALUE;
assert f.validateGroup(LONG_VALUE) == LONG_VALUE;
assert f.validateKey(SHORT_VALUE) == SHORT_VALUE;
assert f.validateKey(LONG_VALUE) == LONG_VALUE;
}
@Test(
methods = @TestMethod(name = "equals", parameters = "String, String")
)
public void test_equals() {
ConfigFile.Format f;
f = ConfigFile.Format.DESKTOP;
assert f.equals("foo", "foo");
assert !f.equals("foo", "FOO");
f = ConfigFile.Format.INI;
assert f.equals("foo", "foo");
assert f.equals("foo", "FOO");
}
@Test(
methods = @TestMethod(name = "escape", parameters = "String")
)
public void test_escape() {
assertNull(ConfigFile.Format.escape(null));
assertEmpty(ConfigFile.Format.escape(""));
assertEquals("\\tFoo\\sBar\\r\\nBaz\\\\", ConfigFile.Format.escape("\tFoo Bar\r\nBaz\\"));
}
@Test(
methods = @TestMethod(name = "unescape", parameters = "String")
)
public void test_unescape() {
assertNull(ConfigFile.Format.unescape(null));
assertEmpty(ConfigFile.Format.unescape(""));
assertEquals("Foo Bar", ConfigFile.Format.unescape("Foo Bar"));
assertEquals("\tFoo Bar\r\nBaz\\", ConfigFile.Format.unescape("\\tFoo\\sBar\\r\\nBaz\\\\"));
assertEquals("\n\n \\\\", ConfigFile.Format.unescape("\\n\\n\\s\\s\\\\\\\\"));
}
@Test(
methods = @TestMethod(name = "getComment")
)
public void test_getComment() {
assert ConfigFile.Format.DESKTOP.getComment().equals("#");
assert ConfigFile.Format.INI.getComment().equals(";");
}
@Test(
methods = @TestMethod(name = "getEOL")
)
public void test_getEOL() {
assert ConfigFile.Format.DESKTOP.getEOL().equals("\n");
assert ConfigFile.Format.INI.getEOL().equals("\r\n");
}
@Test(
methods = @TestMethod(name = "getSuffix")
)
public void test_getSuffix() {
assert ConfigFile.Format.DESKTOP.getSuffix().equals(".desktop");
assert ConfigFile.Format.INI.getSuffix().equals(".ini");
}
@Test(
methods = @TestMethod(name = "isCaseSensitive")
)
public void test_isCaseSensitive() {
assert ConfigFile.Format.DESKTOP.isCaseSensitive();
assert !ConfigFile.Format.INI.isCaseSensitive();
}
}
|
Java
|
package org.apache.rave.portal.service.impl;
import org.apache.rave.model.ExcercicesHasTrainingPlan;
import org.apache.rave.model.Serie;
import org.apache.rave.model.TrainingPlan;
import org.apache.rave.portal.repository.ExcercicesHasTrainingPlanRepository;
import org.apache.rave.portal.repository.SerieRepository;
import org.apache.rave.portal.repository.TrainingPlanRepository;
import org.apache.rave.portal.service.TrainingPlanService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.ArrayList;
import java.util.Collection;
/**
* Created by fhernandez on 23/09/14.
*/
@Service
public class DefaultTrainingPlanService implements TrainingPlanService {
private final Logger logger = LoggerFactory.getLogger(DefaultTrainingPlanService.class);
private final TrainingPlanRepository trainingPlanRepository;
private final ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository;
private final SerieRepository serieRepository;
@Autowired
public DefaultTrainingPlanService(TrainingPlanRepository trainingPlanRepository,ExcercicesHasTrainingPlanRepository exercisesHasTrainingPlanRepository,SerieRepository serieRepository) {
this.trainingPlanRepository = trainingPlanRepository;
this.exercisesHasTrainingPlanRepository = exercisesHasTrainingPlanRepository;
this.serieRepository = serieRepository;
}
@Override
@Transactional
public TrainingPlan getById(Long trainingPlanId) {
TrainingPlan trainingPlan =trainingPlanRepository.getById(trainingPlanId);
if(trainingPlan!=null) {
trainingPlan.getExercisesHasTrainingplans().size();
}
return trainingPlan;
}
@Transactional
public TrainingPlan save(TrainingPlan newPlan) {
Collection<ExcercicesHasTrainingPlan> exerciseList=newPlan.getExercisesHasTrainingplans();
try {
if(newPlan.getEntityId()==null) {
newPlan = trainingPlanRepository.save(newPlan);
}
for (ExcercicesHasTrainingPlan exerciseHasTraining : exerciseList) {
Serie serie = serieRepository.save(exerciseHasTraining.getSerie());
exerciseHasTraining.setSerie(serie);
exerciseHasTraining.setSerieId(serie.getEntityId());
exerciseHasTraining.setTrainingplanId(newPlan.getEntityId());
exerciseHasTraining.setTrainingPlan(newPlan);
}
exercisesHasTrainingPlanRepository.saveList(exerciseList);
}catch(Exception e){
logger.error("Exception saving plan " + e);
}
return newPlan;
}
public Collection<TrainingPlan> getByTrainerID(Long trainerId){
return trainingPlanRepository.getByTrainerID(trainerId);
}
}
|
Java
|
/*
===========================================================================
Copyright 2002-2010 Martin Dvorak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package com.mindcognition.mindraider.ui.swing.trash;
import java.awt.BorderLayout;
import java.awt.GridLayout;
import java.awt.Toolkit;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.awt.event.FocusEvent;
import java.util.HashMap;
import javax.swing.JButton;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.JScrollPane;
import javax.swing.JToolBar;
import javax.swing.JTree;
import javax.swing.event.TreeExpansionEvent;
import javax.swing.event.TreeExpansionListener;
import javax.swing.event.TreeModelEvent;
import javax.swing.event.TreeModelListener;
import javax.swing.event.TreeSelectionEvent;
import javax.swing.event.TreeSelectionListener;
import javax.swing.event.TreeWillExpandListener;
import javax.swing.tree.DefaultMutableTreeNode;
import javax.swing.tree.DefaultTreeModel;
import javax.swing.tree.ExpandVetoException;
import javax.swing.tree.MutableTreeNode;
import javax.swing.tree.TreePath;
import javax.swing.tree.TreeSelectionModel;
import org.apache.commons.lang.ArrayUtils;
import org.apache.log4j.Logger;
import com.emental.mindraider.core.MindRaider;
import com.emental.mindraider.core.rest.Metadata;
import com.emental.mindraider.core.rest.ResourceDescriptor;
import com.emental.mindraider.core.rest.resource.FolderResource;
import com.emental.mindraider.core.rest.resource.OutlineResource;
import com.emental.mindraider.ui.dialogs.ProgressDialogJFrame;
import com.emental.mindraider.ui.gfx.IconsRegistry;
import com.mindcognition.mindraider.application.model.label.LabelCustodianListener;
import com.mindcognition.mindraider.l10n.Messages;
import com.mindcognition.mindraider.ui.swing.dialogs.RestoreNotebookJDialog;
import com.mindcognition.mindraider.ui.swing.explorer.ExplorerJPanel;
import com.mindcognition.mindraider.utils.SwingWorker;
public class TrashJPanel extends JPanel implements
TreeWillExpandListener, TreeExpansionListener, LabelCustodianListener {
private static final Logger logger = Logger.getLogger(TrashJPanel.class);
public static final int LEVEL_ROOT = 0;
public static final int LEVEL_FOLDERS = 1;
public static final int LEVEL_NOTEBOOKS = 2;
/*
* UI components
*/
protected DefaultMutableTreeNode rootNode;
protected DefaultTreeModel treeModel;
protected final JTree tree;
protected JButton undoButton, emptyButton, deleteButton;
private Toolkit toolkit = Toolkit.getDefaultToolkit();
/*
* model
*/
private HashMap treeNodeToResourceUriMap;
/*
* singleton
*/
private static TrashJPanel singleton;
public static TrashJPanel getInstance() {
if (singleton == null) {
singleton = new TrashJPanel();
}
return singleton;
}
private ResourceDescriptor[] discardedNotebooksDescriptors;
/**
* Constructor.
*/
private TrashJPanel() {
treeNodeToResourceUriMap = new HashMap();
rootNode = new DefaultMutableTreeNode(Messages.getString("TrashJPanel.notebookArchive"));
treeModel = new DefaultTreeModel(rootNode);
treeModel.addTreeModelListener(new MyTreeModelListener());
tree = new JTree(treeModel);
tree.setEditable(false);
tree.getSelectionModel().setSelectionMode(
TreeSelectionModel.SINGLE_TREE_SELECTION);
tree.addTreeExpansionListener(this);
tree.addTreeWillExpandListener(this);
tree.setShowsRootHandles(true);
tree.putClientProperty("JTree.lineStyle", "Angled");
// tree rendered
// TODO implement own renderer in order to tooltips
tree.setCellRenderer(new TrashTreeCellRenderer(IconsRegistry
.getImageIcon("trashFull.png"), IconsRegistry
.getImageIcon("explorerNotebookIcon.png")));
setLayout(new BorderLayout());
// control panel
JToolBar tp = new JToolBar();
tp.setLayout(new GridLayout(1, 6));
undoButton = new JButton("", IconsRegistry
.getImageIcon("trashUndo.png"));
undoButton.setEnabled(false);
undoButton.setToolTipText("Restore Outline");
undoButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
new RestoreNotebookJDialog(
(String)treeNodeToResourceUriMap.get(node),
"Restore Outline",
"Restore",
true);
}
});
tp.add(undoButton);
deleteButton = new JButton("", IconsRegistry
.getImageIcon("explorerDeleteSmall.png"));
deleteButton.setToolTipText("Delete Outline");
deleteButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
int result = JOptionPane.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE this Outline?",
"Delete Outline", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
MindRaider.labelCustodian
.deleteOutline((String) treeNodeToResourceUriMap
.get(node));
refresh();
ExplorerJPanel.getInstance().refresh();
}
}
});
tp.add(deleteButton);
emptyButton = new JButton("", IconsRegistry
.getImageIcon("trashEmpty.png"));
emptyButton.setToolTipText(Messages.getString("TrashJPanel.emptyArchive"));
emptyButton.addActionListener(new ActionListener() {
public void actionPerformed(ActionEvent e) {
int result = JOptionPane
.showConfirmDialog(
MindRaider.mainJFrame,
"Do you really want to DELETE all discarded Outlines?",
"Empty Trash", JOptionPane.YES_NO_OPTION);
if (result == JOptionPane.YES_OPTION) {
final SwingWorker worker = new SwingWorker() {
public Object construct() {
ProgressDialogJFrame progressDialogJFrame = new ProgressDialogJFrame(
"Empty Trash",
"<html><br> <b>Deleting:</b> </html>");
try {
ResourceDescriptor[] resourceDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (resourceDescriptors != null) {
for (int i = 0; i < resourceDescriptors.length; i++) {
MindRaider.labelCustodian
.deleteOutline(resourceDescriptors[i]
.getUri());
}
refresh();
}
} finally {
if (progressDialogJFrame != null) {
progressDialogJFrame.dispose();
}
}
return null;
}
};
worker.start();
}
}
});
tp.add(emptyButton);
add(tp, BorderLayout.NORTH);
// add the tree
JScrollPane scrollPane = new JScrollPane(tree);
add(scrollPane);
// build the whole tree
buildTree();
// click handler
tree.addTreeSelectionListener(new TreeSelectionListener() {
public void valueChanged(TreeSelectionEvent e) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) tree
.getLastSelectedPathComponent();
if (node == null) {
return;
}
logger.debug("Tree selection path: "
+ node.getPath()[node.getLevel()]);
enableDisableToolbarButtons(node.getLevel());
}
});
}
/**
* Build tree. This method is called on startup and tree refresh in order to
* reload disc content. Adding/removing of particular nodes during the
* program run is performed on individual nodes.
*/
void buildTree() {
discardedNotebooksDescriptors = MindRaider.labelCustodian
.getDiscardedOutlineDescriptors();
if (!ArrayUtils.isEmpty(discardedNotebooksDescriptors)) {
for (int i = 0; i < discardedNotebooksDescriptors.length; i++) {
addDiscardedNotebookNode(discardedNotebooksDescriptors[i]
.getLabel(), discardedNotebooksDescriptors[i].getUri());
}
// now expland all rows
for (int i = 0; i < tree.getRowCount(); i++) {
tree.expandRow(i);
}
}
tree.setSelectionRow(0);
enableDisableToolbarButtons(0);
}
/**
* Add discarded notebook node.
*
* @param uri
* notebook node.
* @return the node.
*/
public DefaultMutableTreeNode addDiscardedNotebookNode(String label,
String uri) {
DefaultMutableTreeNode parent = null;
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Call this method in order to update the tree.
*/
public void refresh() {
clear();
buildTree();
}
/**
* Move notebook up in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookUp(String folderUri, String notebookUri) {
logger.debug(" moveNotebookUp: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
// add notebook to folder
boolean result = MindRaider.labelCustodian.moveNotebookUp(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookUp(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline up: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Move notebook down in the folder.
*
* @param notebookUri
* @param folderUri
*/
protected boolean moveNotebookDown(String folderUri, String notebookUri) {
logger.debug(" moveNotebookDown: " + folderUri + " " + notebookUri);
if (folderUri != null && notebookUri != null) {
try {
boolean result = MindRaider.labelCustodian.moveNotebookDown(
folderUri, notebookUri);
// TODO PERFORMANCE move it just in the tree instead of refresh
refresh();
return result;
} catch (Exception e1) {
logger.error("moveNotebookDown(String, String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Manipulation Error",
"Unable to move outline down: " + e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return false;
}
}
logger.debug("Outline wont be added URIs are null!");
return false;
}
/**
* Add notebook node to folder node (on new notebook creation).
*
* @param notebookUri
* newly created notebook URI.
*/
public void addNotebookToFolder(String notebookUri) {
logger.debug(" URI of created notebook is: " + notebookUri);
if (notebookUri != null) {
// add notebook to selected folder
TreePath treePath = tree.getSelectionPath();
String folderUri = (String) treeNodeToResourceUriMap.get(treePath
.getLastPathComponent());
logger.debug("Enclosing folder URI is: " + folderUri);
if (folderUri != null) {
try {
// add notebook to folder
MindRaider.labelCustodian.addOutline(folderUri,
notebookUri);
// now add it in the tree
OutlineResource notebookResource = MindRaider.outlineCustodian
.getActiveOutlineResource();
addNotebookNode((DefaultMutableTreeNode) treePath
.getLastPathComponent(), notebookResource.resource
.getMetadata().getUri().toASCIIString(),
notebookResource.getLabel());
} catch (Exception e1) {
logger.error("addNotebookToFolder(String)", e1);
JOptionPane.showMessageDialog(TrashJPanel.this,
"Outline Creation Error",
"Unable to add Outline to folder: "
+ e1.getMessage(),
JOptionPane.ERROR_MESSAGE);
return;
}
}
} else {
logger
.debug("Outline wont be added to folder - it's URI is null!");
}
}
/**
* Remove all nodes except the root node.
*/
public void clear() {
rootNode.removeAllChildren();
treeModel.reload();
treeNodeToResourceUriMap.clear();
}
/**
* Remove the currently selected node.
*/
public void removeCurrentNode() {
TreePath currentSelection = tree.getSelectionPath();
if (currentSelection != null) {
DefaultMutableTreeNode currentNode = (DefaultMutableTreeNode) (currentSelection
.getLastPathComponent());
MutableTreeNode parent = (MutableTreeNode) (currentNode.getParent());
if (parent != null) {
treeModel.removeNodeFromParent(currentNode);
return;
}
}
// Either there was no selection, or the root was selected.
toolkit.beep();
}
/**
* Add child to the currently selected node.
*/
public DefaultMutableTreeNode addObject(Object child) {
DefaultMutableTreeNode parentNode = null;
TreePath parentPath = tree.getSelectionPath();
if (parentPath == null) {
parentNode = rootNode;
} else {
parentNode = (DefaultMutableTreeNode) (parentPath
.getLastPathComponent());
}
return addObject(parentNode, child, true);
}
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child) {
return addObject(parent, child, false);
}
/**
* Add folder node.
*
* @param uri
* folder URI.
* @return the node.
*/
public DefaultMutableTreeNode addFolderNode(String uri) {
DefaultMutableTreeNode parent = null;
// get label from URI
FolderResource resource = new FolderResource(MindRaider.labelCustodian
.get(uri));
Object child = resource.getLabel();
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add notebook node.
*
* @param parent
* folder node.
* @param uri
* notebook URI.
* @param label
* notebook label.
* @return the node.
*/
public DefaultMutableTreeNode addNotebookNode(
DefaultMutableTreeNode parent, String uri, String label) {
Object child = label;
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
// store node to map to be able to get URI from node object
treeNodeToResourceUriMap.put(childNode, uri);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
return childNode;
}
/**
* Add an child object to a parent object.
*
* @param parent
* the parent object.
* @param child
* the child object.
* @param shouldBeVisible
* if <code>true</code> the object should be visible.
* @return Returns a <code>DefaultMutableTreeNode</code>
*/
public DefaultMutableTreeNode addObject(DefaultMutableTreeNode parent,
Object child, boolean shouldBeVisible) {
DefaultMutableTreeNode childNode = new DefaultMutableTreeNode(child);
if (parent == null) {
parent = rootNode;
}
treeModel.insertNodeInto(childNode, parent, parent.getChildCount());
// Make sure the user can see the lovely new node.
if (shouldBeVisible) {
tree.scrollPathToVisible(new TreePath(childNode.getPath()));
}
return childNode;
}
/**
* Custom MyTreeModelListerer class.
*/
class MyTreeModelListener implements TreeModelListener {
/**
* Logger for this class.
*/
private final Logger logger = Logger
.getLogger(MyTreeModelListener.class);
/**
* @see javax.swing.event.TreeModelListener#treeNodesChanged(javax.swing.event.TreeModelEvent)
*/
public void treeNodesChanged(TreeModelEvent e) {
DefaultMutableTreeNode node;
node = (DefaultMutableTreeNode) (e.getTreePath()
.getLastPathComponent());
/*
* If the event lists children, then the changed node is the child
* of the node we've already gotten. Otherwise, the changed node and
* the specified node are the same.
*/
// ToDo
try {
int index = e.getChildIndices()[0];
node = (DefaultMutableTreeNode) (node.getChildAt(index));
} catch (NullPointerException exc) {
//
}
logger.debug("The user has finished editing the node.");
logger.debug("New value: " + node.getUserObject());
}
public void treeNodesInserted(TreeModelEvent e) {
}
public void treeNodesRemoved(TreeModelEvent e) {
}
public void treeStructureChanged(TreeModelEvent e) {
}
}
public void treeCollapsed(TreeExpansionEvent e) {
logger.debug("Tree colapsed event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeExpansionListener#treeExpanded(javax.swing.event.TreeExpansionEvent)
*/
public void treeExpanded(TreeExpansionEvent e) {
logger.debug("Tree expanded event..." + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillCollapse(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillCollapse(TreeExpansionEvent e)
throws ExpandVetoException {
logger.debug("Tree will collapse " + e.getPath());
}
/**
* @see javax.swing.event.TreeWillExpandListener#treeWillExpand(javax.swing.event.TreeExpansionEvent)
*/
public void treeWillExpand(TreeExpansionEvent e) throws ExpandVetoException {
logger.debug("Tree will expand " + e.getPath());
/*
* DefaultMutableTreeNode node = (DefaultMutableTreeNode)
* tree.getLastSelectedPathComponent(); if (node == null) { return; }
* logger.debug(""+node.getPath()[node.getLevel()]); // buttons
* disabling switch(node.getLevel()) { case LEVEL_FOLDERS: // disconnect
* childrens from the node Enumeration enumeration=node.children(); //
* delete nodes itself while (enumeration.hasMoreElements()) { Object
* object=enumeration.nextElement();
* treeNodeToResourceUriMap.remove(object);
* treeModel.removeNodeFromParent((MutableTreeNode)object); } // get
* folder URI logger.debug("Expanding folder:
* "+treeNodeToResourceUriMap.get(node)); FolderResource folder =new
* FolderResource(MindRaider.folderCustodian.get((String)treeNodeToResourceUriMap.get(node)));
* String[] notebookUris=folder.getNotebookUris(); if (notebookUris !=
* null) { for (int i= 0; i < notebookUris.length; i++) {
* NotebookResource notebook=new
* NotebookResource(MindRider.notebookCustodian.get(notebookUris[i]));
* addNotebookNode(node,notebook.resource.metadata.uri.toASCIIString(),notebook.getLabel()); } } }
*/
}
/**
* @see com.emental.LabelCustodianListener.folder.FolderCustodianListener#folderCreated()
*/
public void labelCreated(FolderResource folder) {
Metadata meta = folder.getResource().getMetadata();
logger.debug("Folder created: " + meta.getUri().toASCIIString());
// handle creation of the folder
addFolderNode(meta.getUri().toASCIIString());
}
/**
* @see java.awt.event.FocusListener#focusGained(java.awt.event.FocusEvent)
*/
public void focusGained(FocusEvent arg0) {
// TODO Auto-generated method stub
}
/**
* Change status in the toolbar buttons.
*
* @param level
* The level could be <code>LEVEL_ROOT</code> or
* <code>LEVEL_FOLDERS</code>
*/
protected void enableDisableToolbarButtons(int level) {
// buttons disabling
switch (level) {
case LEVEL_ROOT:
undoButton.setEnabled(false);
deleteButton.setEnabled(false);
emptyButton.setEnabled(true);
break;
case LEVEL_FOLDERS:
undoButton.setEnabled(true);
deleteButton.setEnabled(true);
emptyButton.setEnabled(true);
break;
}
}
private static final long serialVersionUID = 5028293540089775890L;
}
|
Java
|
package com.fpliu.newton.ui.list;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.GridView;
/**
* @author 792793182@qq.com 2017-06-30.
*/
public interface IGrid<T, V extends GridView> extends ICommon<T> {
V getGridView();
void setItemAdapter(ItemAdapter<T> itemAdapter);
ItemAdapter<T> getItemAdapter();
void setOnItemClickListener(AdapterView.OnItemClickListener listener);
int getItemViewTypeCount();
int getItemViewType(int position);
View getItemView(int position, View convertView, ViewGroup parent);
void notifyDataSetChanged();
void setNumColumns(int numColumns);
}
|
Java
|
/*******************************************************************************
* Copyright (c) 2012-2013 University of Stuttgart.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and the Apache License 2.0 which both accompany this distribution,
* and are available at http://www.eclipse.org/legal/epl-v10.html
* and http://www.apache.org/licenses/LICENSE-2.0
*
* Contributors:
* Oliver Kopp - initial API and implementation
*******************************************************************************/
/**
* This package contains the REST resources
*
* Mostly, they produces Viewables, where a JSP and the current resource is
* passed As the JSP itself handles plain Java objects and not Responses, the
* resources have also methods returning POJOs. This might be ugly design, but
* was quick to implement.
*
* The package structure is mirrored in src/main/webapp/jsp to ease finding the
* JSPs belonging to a resource.
*
* The resources are <em>not</em> in line with the resource model of the TOSCA
* container. Especially, we do not employ HATEOAS here.
*/
package org.eclipse.winery.repository.resources;
|
Java
|
/**
*
Package: MAG - VistA Imaging
WARNING: Per VHA Directive 2004-038, this routine should not be modified.
Date Created: Jul 10, 2012
Site Name: Washington OI Field Office, Silver Spring, MD
Developer: VHAISWWERFEJ
Description:
;; +--------------------------------------------------------------------+
;; Property of the US Government.
;; No permission to copy or redistribute this software is given.
;; Use of unreleased versions of this software requires the user
;; to execute a written test agreement with the VistA Imaging
;; Development Office of the Department of Veterans Affairs,
;; telephone (301) 734-0100.
;;
;; The Food and Drug Administration classifies this software as
;; a Class II medical device. As such, it may not be changed
;; in any way. Modifications to this software may result in an
;; adulterated medical device under 21CFR820, the use of which
;; is considered to be a violation of US Federal Statutes.
;; +--------------------------------------------------------------------+
*/
package gov.va.med.imaging.pathology.rest.translator;
import java.util.Date;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* @author VHAISWWERFEJ
*
*/
public class PathologyRestTranslatorTest
{
@Test
public void testDateTranslation()
{
try
{
Date date = PathologyRestTranslator.translateDate("201207101435");
System.out.println("Date: " + date);
}
catch(Exception ex)
{
ex.printStackTrace();
fail(ex.getMessage());
}
}
}
|
Java
|
package io.quarkus.grpc.examples.hello;
import static io.restassured.RestAssured.get;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.jupiter.api.Test;
import io.quarkus.test.junit.QuarkusTest;
@QuarkusTest
class HelloWorldMutualTlsEndpointTest {
@Test
public void testHelloWorldServiceUsingBlockingStub() {
String response = get("/hello/blocking/neo").asString();
assertThat(response).isEqualTo("Hello neo");
}
@Test
public void testHelloWorldServiceUsingMutinyStub() {
String response = get("/hello/mutiny/neo-mutiny").asString();
assertThat(response).isEqualTo("Hello neo-mutiny");
}
}
|
Java
|
/*
* avdtp_internal.h - avdtp handling
* Copyright (c) 2015-2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <bluetooth/avdtp.h>
/* @brief A2DP ROLE's */
#define A2DP_SRC_ROLE 0x00
#define A2DP_SNK_ROLE 0x01
/* @brief AVDTP Role */
#define BT_AVDTP_INT 0x00
#define BT_AVDTP_ACP 0x01
#define BT_L2CAP_PSM_AVDTP 0x0019
/* AVDTP SIGNAL HEADER - Packet Type*/
#define BT_AVDTP_PACKET_TYPE_SINGLE 0x00
#define BT_AVDTP_PACKET_TYPE_START 0x01
#define BT_AVDTP_PACKET_TYPE_CONTINUE 0x02
#define BT_AVDTP_PACKET_TYPE_END 0x03
/* AVDTP SIGNAL HEADER - MESSAGE TYPE */
#define BT_AVDTP_MSG_TYPE_CMD 0x00
#define BT_AVDTP_MSG_TYPE_GEN_REJECT 0x01
#define BT_AVDTP_MSG_TYPE_ACCEPT 0x02
#define BT_AVDTP_MSG_TYPE_REJECT 0x03
/* @brief AVDTP SIGNAL HEADER - Signal Identifier */
#define BT_AVDTP_DISCOVER 0x01
#define BT_AVDTP_GET_CAPABILITIES 0x02
#define BT_AVDTP_SET_CONFIGURATION 0x03
#define BT_AVDTP_GET_CONFIGURATION 0x04
#define BT_AVDTP_RECONFIGURE 0x05
#define BT_AVDTP_OPEN 0x06
#define BT_AVDTP_START 0x07
#define BT_AVDTP_CLOSE 0x08
#define BT_AVDTP_SUSPEND 0x09
#define BT_AVDTP_ABORT 0x0a
#define BT_AVDTP_SECURITY_CONTROL 0x0b
#define BT_AVDTP_GET_ALL_CAPABILITIES 0x0c
#define BT_AVDTP_DELAYREPORT 0x0d
/* @brief AVDTP STATE */
#define BT_AVDTP_STATE_IDLE 0x01
#define BT_AVDTP_STATE_CONFIGURED 0x02
#define BT_AVDTP_STATE_OPEN 0x03
#define BT_AVDTP_STATE_STREAMING 0x04
#define BT_AVDTP_STATE_CLOSING 0x05
#define BT_AVDTP_STATE_ABORT 0x06
#define BT_AVDTP_STATE_SIG_CONNECTED 0x07
#define BT_AVDTP_STATE_SIG_DISCONNECTED 0x08
#define BT_AVDTP_STATE_INVALID 0x00
/* @brief AVDTP Media TYPE */
#define BT_AVDTP_SERVICE_CAT_MEDIA_TRANSPORT 0x01
#define BT_AVDTP_SERVICE_CAT_REPORTING 0x02
#define BT_AVDTP_SERVICE_CAT_RECOVERY 0x03
#define BT_AVDTP_SERVICE_CAT_CONTENT_PROTECTION 0x04
#define BT_AVDTP_SERVICE_CAT_HDR_COMPRESSION 0x05
#define BT_AVDTP_SERVICE_CAT_MULTIPLEXING 0x06
#define BT_AVDTP_SERVICE_CAT_MEDIA_CODEC 0x07
#define BT_AVDTP_SERVICE_CAT_DELAYREPORTING 0x08
/* AVDTP Error Codes */
#define BT_AVDTP_SUCCESS 0x00
#define BT_AVDTP_ERR_BAD_HDR_FORMAT 0x01
#define BT_AVDTP_ERR_BAD_LENGTH 0x11
#define BT_AVDTP_ERR_BAD_ACP_SEID 0x12
#define BT_AVDTP_ERR_SEP_IN_USE 0x13
#define BT_AVDTP_ERR_SEP_NOT_IN_USE 0x14
#define BT_AVDTP_ERR_BAD_SERV_CATEGORY 0x17
#define BT_AVDTP_ERR_BAD_PAYLOAD_FORMAT 0x18
#define BT_AVDTP_ERR_NOT_SUPPORTED_COMMAND 0x19
#define BT_AVDTP_ERR_INVALID_CAPABILITIES 0x1a
#define BT_AVDTP_ERR_BAD_RECOVERY_TYPE 0x22
#define BT_AVDTP_ERR_BAD_MEDIA_TRANSPORT_FORMAT 0x23
#define BT_AVDTP_ERR_BAD_RECOVERY_FORMAT 0x25
#define BT_AVDTP_ERR_BAD_ROHC_FORMAT 0x26
#define BT_AVDTP_ERR_BAD_CP_FORMAT 0x27
#define BT_AVDTP_ERR_BAD_MULTIPLEXING_FORMAT 0x28
#define BT_AVDTP_ERR_UNSUPPORTED_CONFIGURAION 0x29
#define BT_AVDTP_ERR_BAD_STATE 0x31
#define BT_AVDTP_MIN_MTU 48
#define BT_AVDTP_MAX_MTU CONFIG_BLUETOOTH_L2CAP_IN_MTU
#define BT_AVDTP_MIN_SEID 0x01
#define BT_AVDTP_MAX_SEID 0x3E
/* Helper to calculate needed outgoing buffer size. */
#define BT_AVDTP_BUF_SIZE(mtu) (CONFIG_BLUETOOTH_HCI_SEND_RESERVE + \
sizeof(struct bt_hci_acl_hdr) + \
sizeof(struct bt_l2cap_hdr) + \
BT_AVDTP_SIG_HDR_LEN + (mtu))
struct bt_avdtp_single_sig_hdr {
uint8_t hdr;
uint8_t signal_id;
} __packed;
#define BT_AVDTP_SIG_HDR_LEN sizeof(struct bt_avdtp_single_sig_hdr)
struct bt_avdtp_cfm_cb {
/*
* Discovery_cfm;
* get_capabilities_cfm;
* set_configuration_cfm;
* open_cfm;
* start_cfm;
* suspend_cfm;
* close_cfm;
*/
};
struct bt_avdtp_ind_cb {
/*
* discovery_ind;
* get_capabilities_ind;
* set_configuration_ind;
* open_ind;
* start_ind;
* suspend_ind;
* close_ind;
*/
};
struct bt_avdtp_event_cb {
struct bt_avdtp_ind_cb *ind;
struct bt_avdtp_cfm_cb *cfm;
};
/** @brief Global AVDTP session structure. */
struct bt_avdtp {
struct bt_l2cap_br_chan br_chan;
uint8_t state; /* current state of AVDTP*/
};
/* Initialize AVDTP layer*/
int bt_avdtp_init(void);
/* Application register with AVDTP layer */
int bt_avdtp_register(struct bt_avdtp_event_cb *cb);
/* AVDTP connect */
int bt_avdtp_connect(struct bt_conn *conn, struct bt_avdtp *session);
/* AVDTP disconnect */
int bt_avdtp_disconnect(struct bt_avdtp *session);
/* AVDTP SEP register function */
int bt_avdtp_register_sep(uint8_t media_type, uint8_t role,
struct bt_avdtp_seid_lsep *sep);
|
Java
|
/**
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* under the License.
*/
package org.apache.hadoop.hbase.filter;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
/**
*/
@Category(MediumTests.class)
public class TestFuzzyRowAndColumnRangeFilter {
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final Log LOG = LogFactory.getLog(this.getClass());
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.
}
@Test
public void Test() throws Exception {
String cf = "f";
String table = "TestFuzzyAndColumnRangeFilterClient";
HTable ht = TEST_UTIL.createTable(Bytes.toBytes(table),
Bytes.toBytes(cf), Integer.MAX_VALUE);
// 10 byte row key - (2 bytes 4 bytes 4 bytes)
// 4 byte qualifier
// 4 byte value
for (int i1 = 0; i1 < 2; i1++) {
for (int i2 = 0; i2 < 5; i2++) {
byte[] rk = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(rk);
buf.clear();
buf.putShort((short) 2);
buf.putInt(i1);
buf.putInt(i2);
for (int c = 0; c < 5; c++) {
byte[] cq = new byte[4];
Bytes.putBytes(cq, 0, Bytes.toBytes(c), 0, 4);
Put p = new Put(rk);
p.setDurability(Durability.SKIP_WAL);
p.add(cf.getBytes(), cq, Bytes.toBytes(c));
ht.put(p);
LOG.info("Inserting: rk: " + Bytes.toStringBinary(rk) + " cq: "
+ Bytes.toStringBinary(cq));
}
}
}
TEST_UTIL.flush();
// test passes
runTest(ht, 0, 10);
// test fails
runTest(ht, 1, 8);
}
private void runTest(HTable hTable, int cqStart, int expectedSize) throws IOException {
// [0, 2, ?, ?, ?, ?, 0, 0, 0, 1]
byte[] fuzzyKey = new byte[10];
ByteBuffer buf = ByteBuffer.wrap(fuzzyKey);
buf.clear();
buf.putShort((short) 2);
for (int i = 0; i < 4; i++)
buf.put((byte)63);
buf.putInt((short)1);
byte[] mask = new byte[] {0 , 0, 1, 1, 1, 1, 0, 0, 0, 0};
Pair<byte[], byte[]> pair = new Pair<byte[], byte[]>(fuzzyKey, mask);
FuzzyRowFilter fuzzyRowFilter = new FuzzyRowFilter(Lists.newArrayList(pair));
ColumnRangeFilter columnRangeFilter = new ColumnRangeFilter(Bytes.toBytes(cqStart), true
, Bytes.toBytes(4), true);
//regular test
runScanner(hTable, expectedSize, fuzzyRowFilter, columnRangeFilter);
//reverse filter order test
runScanner(hTable, expectedSize, columnRangeFilter, fuzzyRowFilter);
}
private void runScanner(HTable hTable, int expectedSize, Filter... filters) throws IOException {
String cf = "f";
Scan scan = new Scan();
scan.addFamily(cf.getBytes());
FilterList filterList = new FilterList(filters);
scan.setFilter(filterList);
ResultScanner scanner = hTable.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
Result result;
long timeBeforeScan = System.currentTimeMillis();
while ((result = scanner.next()) != null) {
for (Cell kv : result.listCells()) {
LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
+ Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
results.add(kv);
}
}
long scanTime = System.currentTimeMillis() - timeBeforeScan;
scanner.close();
LOG.info("scan time = " + scanTime + "ms");
LOG.info("found " + results.size() + " results");
assertEquals(expectedSize, results.size());
}
}
|
Java
|
/*
* Trap
* An anti-pryer server for better privacy
*
* This file is a part of Trap project
*
* Copyright 2016 Rain Lee <raincious@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package logger
import (
"github.com/raincious/trap/trap/core/types"
"bufio"
"fmt"
"time"
)
type FilePrinter struct {
writer *bufio.Writer
writeCounts uint16
}
func NewFilePrinter(w *bufio.Writer) (*FilePrinter, *types.Throw) {
_, writeErr := w.Write([]byte(""))
if writeErr != nil {
return nil, types.ConvertError(writeErr)
}
return &FilePrinter{
writer: w,
}, nil
}
func (l *FilePrinter) save(w types.String, c types.String,
t time.Time, m types.String) {
_, err := l.writer.WriteString(fmt.Sprintf("<%s> %s [%s]: %s\r\n",
w, c, t.Format(time.StampMilli), m))
if err != nil {
panic(fmt.Errorf("Can't write log file due to error: %s", err))
}
l.writeCounts += 1
if l.writeCounts > 10 {
l.writer.Flush()
l.writeCounts = 0
}
}
func (l *FilePrinter) Info(c types.String, t time.Time, m types.String) {
l.save("INF", c, t, m)
}
func (l *FilePrinter) Debug(c types.String, t time.Time, m types.String) {
l.save("DBG", c, t, m)
}
func (l *FilePrinter) Warning(c types.String, t time.Time, m types.String) {
l.save("WRN", c, t, m)
}
func (l *FilePrinter) Error(c types.String, t time.Time, m types.String) {
l.save("ERR", c, t, m)
}
func (l *FilePrinter) Print(c types.String, t time.Time, m types.String) {
l.save("DEF", c, t, m)
}
|
Java
|
local json = require "cjson"
local http_client = require "kong.tools.http_client"
local spec_helper = require "spec.spec_helpers"
local BASE_URL = spec_helper.API_URL.."/apis/%s/plugins/"
describe("Rate Limiting API", function()
setup(function()
spec_helper.prepare_db()
spec_helper.insert_fixtures {
api = {
{ name = "tests-rate-limiting1", request_host = "test1.com", upstream_url = "http://mockbin.com" }
}
}
spec_helper.start_kong()
local response = http_client.get(spec_helper.API_URL.."/apis/")
BASE_URL = string.format(BASE_URL, json.decode(response).data[1].id)
end)
teardown(function()
spec_helper.stop_kong()
end)
describe("POST", function()
it("should not save with empty config", function()
local response, status = http_client.post(BASE_URL, { name = "rate-limiting" })
local body = json.decode(response)
assert.are.equal(400, status)
assert.are.equal("You need to set at least one limit: second, minute, hour, day, month, year", body.message)
end)
it("should save with proper config", function()
local response, status = http_client.post(BASE_URL, { name = "rate-limiting", ["config.second"] = 10 })
local body = json.decode(response)
assert.are.equal(201, status)
assert.are.equal(10, body.config.second)
end)
end)
end)
|
Java
|
define([
'jquery',
'underscore',
'backbone',
'app'
],
function (
$, _, Backbone, app
) {
var Models = {},
Collections = {},
Views = {};
Models.Project = Backbone.Model.extend();
Collections.Projects = Backbone.Model.extend({
model: Models.Project,
url: function() {
return app.api('projects/' + this.get('platform') +
(this.get('uri') ? '/' + this.get('uri') : ''));
},
parse: function(res) {
return { projects: res };
}
});
Models.Project = Backbone.Model.extend({
url: function() {
return app.api('projects?' + this.get('params'));
}
});
return {
Models: Models,
Collections: Collections,
Views: Views
};
});
|
Java
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a ResNet-50 model on ImageNet on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import sys
import time
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
# For Cloud environment, add parent directory for imports
sys.path.append(os.path.dirname(os.path.abspath(sys.path[0])))
from official.resnet import imagenet_input # pylint: disable=g-import-not-at-top
from official.resnet import resnet_main
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.python.estimator import estimator
FLAGS = tf.flags.FLAGS
CKPT_PATTERN = r'model\.ckpt-(?P<gs>[0-9]+)\.data'
flags.DEFINE_string(
'data_dir_small', default=None,
help=('The directory where the resized (160x160) ImageNet input data is '
'stored. This is only to be used in conjunction with the '
'resnet_benchmark.py script.'))
flags.DEFINE_bool(
'use_fast_lr', default=False,
help=('Enabling this uses a faster learning rate schedule along with '
'different image sizes in the input pipeline. This is only to be '
'used in conjunction with the resnet_benchmark.py script.'))
# Number of training and evaluation images in the standard ImageNet dataset
NUM_TRAIN_IMAGES = 1281167
NUM_EVAL_IMAGES = 50000
def main(unused_argv):
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.iterations_per_loop,
keep_checkpoint_max=None,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_cores,
per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2)) # pylint: disable=line-too-long
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
if FLAGS.use_fast_lr:
resnet_main.LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 4), (0.1, 21), (0.01, 35), (0.001, 43)
]
imagenet_train_small = imagenet_input.ImageNetInput(
is_training=True,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_eval_small = imagenet_input.ImageNetInput(
is_training=False,
image_size=128,
data_dir=FLAGS.data_dir_small,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input,
cache=True)
imagenet_train_large = imagenet_input.ImageNetInput(
is_training=True,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
imagenet_eval_large = imagenet_input.ImageNetInput(
is_training=False,
image_size=288,
data_dir=FLAGS.data_dir,
num_parallel_calls=FLAGS.num_parallel_calls,
use_bfloat16=True,
transpose_input=FLAGS.transpose_input)
resnet_classifier = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=resnet_main.resnet_model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.mode == 'train':
current_step = estimator._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
batches_per_epoch = NUM_TRAIN_IMAGES / FLAGS.train_batch_size
tf.logging.info('Training for %d steps (%.2f epochs in total). Current'
' step %d.' % (FLAGS.train_steps,
FLAGS.train_steps / batches_per_epoch,
current_step))
start_timestamp = time.time() # This time will include compilation time
# Write a dummy file at the start of training so that we can measure the
# runtime at each checkpoint from the file write time.
tf.gfile.MkDir(FLAGS.model_dir)
if not tf.gfile.Exists(os.path.join(FLAGS.model_dir, 'START')):
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'START'), 'w') as f:
f.write(str(start_timestamp))
if FLAGS.use_fast_lr:
small_steps = int(18 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
normal_steps = int(41 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size)
large_steps = int(min(50 * NUM_TRAIN_IMAGES / FLAGS.train_batch_size,
FLAGS.train_steps))
resnet_classifier.train(
input_fn=imagenet_train_small.input_fn, max_steps=small_steps)
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=normal_steps)
resnet_classifier.train(
input_fn=imagenet_train_large.input_fn,
max_steps=large_steps)
else:
resnet_classifier.train(
input_fn=imagenet_train.input_fn, max_steps=FLAGS.train_steps)
else:
assert FLAGS.mode == 'eval'
start_timestamp = tf.gfile.Stat(
os.path.join(FLAGS.model_dir, 'START')).mtime_nsec
results = []
eval_steps = NUM_EVAL_IMAGES // FLAGS.eval_batch_size
ckpt_steps = set()
all_files = tf.gfile.ListDirectory(FLAGS.model_dir)
for f in all_files:
mat = re.match(CKPT_PATTERN, f)
if mat is not None:
ckpt_steps.add(int(mat.group('gs')))
ckpt_steps = sorted(list(ckpt_steps))
tf.logging.info('Steps to be evaluated: %s' % str(ckpt_steps))
for step in ckpt_steps:
ckpt = os.path.join(FLAGS.model_dir, 'model.ckpt-%d' % step)
batches_per_epoch = NUM_TRAIN_IMAGES // FLAGS.train_batch_size
current_epoch = step // batches_per_epoch
if FLAGS.use_fast_lr:
if current_epoch < 18:
eval_input_fn = imagenet_eval_small.input_fn
if current_epoch >= 18 and current_epoch < 41:
eval_input_fn = imagenet_eval.input_fn
if current_epoch >= 41: # 41:
eval_input_fn = imagenet_eval_large.input_fn
else:
eval_input_fn = imagenet_eval.input_fn
end_timestamp = tf.gfile.Stat(ckpt + '.index').mtime_nsec
elapsed_hours = (end_timestamp - start_timestamp) / (1e9 * 3600.0)
tf.logging.info('Starting to evaluate.')
eval_start = time.time() # This time will include compilation time
eval_results = resnet_classifier.evaluate(
input_fn=eval_input_fn,
steps=eval_steps,
checkpoint_path=ckpt)
eval_time = int(time.time() - eval_start)
tf.logging.info('Eval results: %s. Elapsed seconds: %d' %
(eval_results, eval_time))
results.append([
current_epoch,
elapsed_hours,
'%.2f' % (eval_results['top_1_accuracy'] * 100),
'%.2f' % (eval_results['top_5_accuracy'] * 100),
])
time.sleep(60)
with tf.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.tsv'), 'wb') as tsv_file: # pylint: disable=line-too-long
writer = csv.writer(tsv_file, delimiter='\t')
writer.writerow(['epoch', 'hours', 'top1Accuracy', 'top5Accuracy'])
writer.writerows(results)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
Java
|
<?php
require_once "db_config.php";
session_start();
if(!array_key_exists('id', $_SESSION))
{
header("location : index.php");
}
?>
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title></title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<link rel="stylesheet" href="css/bootstrap-responsive.min.css">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.2-respond-1.1.0.min.js"></script>
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an <strong>outdated</strong> browser. Please <a href="http://browsehappy.com/">upgrade your browser</a> or <a href="http://www.google.com/chromeframe/?redirect=true">activate Google Chrome Frame</a> to improve your experience.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<!-- <div class="navbar navbar-inverse navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<a class="btn btn-navbar" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</a>
<a class="brand" href="#">Project name</a>
<div class="nav-collapse collapse">
<ul class="nav">
<li class="active"><a href="#">Home</a></li>
<li><a href="#about">About</a></li>
<li><a href="#contact">Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="#">Action</a></li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="nav-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
</ul>
<form class="navbar-form pull-right" action='login.php' method='post'>
<input class="span2" type="text" placeholder="user" name='user' id='user'>
<input class="span2" type="password" placeholder="password" name='password' id='password'>
<input type='submit'class="btn" name='Submit' value='Submit' />
</form>
</div><!--/.nav-collapse
</div>
</div>
</div>
-->
<?php require_once "menu_func.php"; ?>
<div class="container">
<!-- Main hero unit for a primary marketing message or call to action -->
<div class="hero-unit">
<h1>Добре дошъл <?php echo $_SESSION['user']; ?></h1>
<p>Да по едитнем малко ? </p>
</div>
<!-- Example row of columns -->
<div class="row">
<div class="span4">
<h2>Добавяне на Меню</h2>
<p> <form class="navbar-form pull-right" action="addmenu.php" method="post">
<input class="span2" type="text" placeholder="Име" name="menu_name" id="menu_name"><br />
<input class="span2" type="text" placeholder="Линк" name="menu_addr" id="menu_addr"><br />
<select name="menu_type" id="menu_type" >
<option value ="top">Меню</option>
<option value ="dropdown">Drop</option>
<option value ="usermenu">User</option>
</select>
<br />
<input type="submit"class="btn" name="Submit" value="Submit" />
</form>
</p>
</div>
<!-- <div class="span4">
<h2>Heading</h2>
<p>Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui. </p>
<p><a class="btn" href="#">View details »</a></p>
</div>
<div class="span4">
<h2>Heading</h2>
<p>Donec sed odio dui. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Vestibulum id ligula porta felis euismod semper. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus.</p>
<p><a class="btn" href="#">View details »</a></p>
</div> -->
</div>
<hr>
<footer>
<p>© Company 2012</p>
</footer>
</div> <!-- /container -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/vendor/jquery-1.9.1.min.js"><\/script>')</script>
<script src="js/vendor/bootstrap.min.js"></script>
<script src="js/main.js"></script>
<script>
var _gaq=[['_setAccount','UA-XXXXX-X'],['_trackPageview']];
(function(d,t){var g=d.createElement(t),s=d.getElementsByTagName(t)[0];
g.src=('https:'==location.protocol?'//ssl':'//www')+'.google-analytics.com/ga.js';
s.parentNode.insertBefore(g,s)}(document,'script'));
</script>
</body>
</html>
|
Java
|
package pl.matisoft.soy.config;
import com.google.template.soy.jssrc.SoyJsSrcOptions;
import com.google.template.soy.tofu.SoyTofuOptions;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.context.support.ServletContextResource;
import org.springframework.web.servlet.ViewResolver;
import pl.matisoft.soy.ContentNegotiator;
import pl.matisoft.soy.DefaultContentNegotiator;
import pl.matisoft.soy.SoyTemplateViewResolver;
import pl.matisoft.soy.bundle.DefaultSoyMsgBundleResolver;
import pl.matisoft.soy.bundle.SoyMsgBundleResolver;
import pl.matisoft.soy.compile.DefaultTofuCompiler;
import pl.matisoft.soy.compile.TofuCompiler;
import pl.matisoft.soy.data.DefaultToSoyDataConverter;
import pl.matisoft.soy.data.ToSoyDataConverter;
import pl.matisoft.soy.data.adjust.ModelAdjuster;
import pl.matisoft.soy.data.adjust.SpringModelAdjuster;
import pl.matisoft.soy.global.compile.CompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.compile.EmptyCompileTimeGlobalModelResolver;
import pl.matisoft.soy.global.runtime.EmptyGlobalRuntimeModelResolver;
import pl.matisoft.soy.global.runtime.GlobalRuntimeModelResolver;
import pl.matisoft.soy.holder.CompiledTemplatesHolder;
import pl.matisoft.soy.holder.DefaultCompiledTemplatesHolder;
import pl.matisoft.soy.locale.LocaleProvider;
import pl.matisoft.soy.locale.SpringLocaleProvider;
import pl.matisoft.soy.render.DefaultTemplateRenderer;
import pl.matisoft.soy.render.TemplateRenderer;
import pl.matisoft.soy.template.DefaultTemplateFilesResolver;
import pl.matisoft.soy.template.TemplateFilesResolver;
import javax.inject.Inject;
import javax.servlet.ServletContext;
/**
* Created with IntelliJ IDEA.
* User: mati
* Date: 12/11/2013
* Time: 19:55
*/
@Configuration
public class SpringSoyViewBaseConfig {
@Value("${soy.hot.reload.mode:false}")
private boolean hotReloadMode;
@Value("${soy.templates.resolve.recursively:true}")
private boolean recursive;
@Value("${soy.templates.file.extension:soy}")
private String fileExtension;
@Value("${soy.templates.directory:/WEB-INF/templates}")
private String templatesPath;
@Value("${soy.i18n.xliff.path:xliffs/messages}")
private String messagesPath;
@Value("${soy.encoding:utf-8}")
private String encoding;
@Value("${soy.i18n.fallback.to.english:true}")
private boolean fallbackToEnglish;
@Value("${soy.preCompile.templates:false}")
private boolean preCompileTemplates;
@Value("${soy.indexView:index}")
private String indexView;
@Value("${soy.logical.prefix:soy:}")
private String logicalPrefix;
@Value("${soy.resolver.order:2147483647}")
private int order;
@Inject
private ServletContext servletContext;
@Bean
public LocaleProvider soyLocaleProvider() {
return new SpringLocaleProvider();
}
@Bean
public DefaultTemplateFilesResolver soyTemplateFilesResolver() throws Exception {
final DefaultTemplateFilesResolver defaultTemplateFilesResolver = new DefaultTemplateFilesResolver();
defaultTemplateFilesResolver.setHotReloadMode(hotReloadMode);
defaultTemplateFilesResolver.setRecursive(recursive);
defaultTemplateFilesResolver.setFilesExtension(fileExtension);
defaultTemplateFilesResolver.setTemplatesLocation(new ServletContextResource(servletContext, templatesPath));
return defaultTemplateFilesResolver;
}
@Bean
public CompileTimeGlobalModelResolver soyCompileTimeGlobalModelResolver() {
return new EmptyCompileTimeGlobalModelResolver();
}
@Bean
public ToSoyDataConverter soyToSoyDataConverter() {
return new DefaultToSoyDataConverter();
}
@Bean
public SoyJsSrcOptions soyJsSourceOptions() {
return new SoyJsSrcOptions();
}
@Bean
public SoyTofuOptions soyTofuOptions() {
final SoyTofuOptions soyTofuOptions = new SoyTofuOptions();
soyTofuOptions.setUseCaching(!hotReloadMode);
return soyTofuOptions;
}
@Bean
public TofuCompiler soyTofuCompiler(final CompileTimeGlobalModelResolver compileTimeGlobalModelResolver, final SoyJsSrcOptions soyJsSrcOptions, final SoyTofuOptions soyTofuOptions) {
final DefaultTofuCompiler defaultTofuCompiler = new DefaultTofuCompiler();
defaultTofuCompiler.setHotReloadMode(hotReloadMode);
defaultTofuCompiler.setCompileTimeGlobalModelResolver(compileTimeGlobalModelResolver);
defaultTofuCompiler.setSoyJsSrcOptions(soyJsSrcOptions);
defaultTofuCompiler.setSoyTofuOptions(soyTofuOptions);
return defaultTofuCompiler;
}
@Bean
public SoyMsgBundleResolver soyMsgBundleResolver() {
final DefaultSoyMsgBundleResolver defaultSoyMsgBundleResolver = new DefaultSoyMsgBundleResolver();
defaultSoyMsgBundleResolver.setHotReloadMode(hotReloadMode);
defaultSoyMsgBundleResolver.setMessagesPath(messagesPath);
defaultSoyMsgBundleResolver.setFallbackToEnglish(fallbackToEnglish);
return defaultSoyMsgBundleResolver;
}
@Bean
public CompiledTemplatesHolder soyTemplatesHolder(final TemplateFilesResolver templateFilesResolver, final TofuCompiler tofuCompiler) throws Exception {
final DefaultCompiledTemplatesHolder defaultCompiledTemplatesHolder = new DefaultCompiledTemplatesHolder();
defaultCompiledTemplatesHolder.setHotReloadMode(hotReloadMode);
defaultCompiledTemplatesHolder.setPreCompileTemplates(preCompileTemplates);
defaultCompiledTemplatesHolder.setTemplatesFileResolver(templateFilesResolver);
defaultCompiledTemplatesHolder.setTofuCompiler(tofuCompiler);
return defaultCompiledTemplatesHolder;
}
@Bean
public TemplateRenderer soyTemplateRenderer(final ToSoyDataConverter toSoyDataConverter) {
final DefaultTemplateRenderer defaultTemplateRenderer = new DefaultTemplateRenderer();
defaultTemplateRenderer.setHotReloadMode(hotReloadMode);
defaultTemplateRenderer.setToSoyDataConverter(toSoyDataConverter);
return defaultTemplateRenderer;
}
@Bean
public ModelAdjuster soySpringModelAdjuster() {
return new SpringModelAdjuster();
}
@Bean
public GlobalRuntimeModelResolver soyGlobalRuntimeModelResolver() {
return new EmptyGlobalRuntimeModelResolver();
}
@Bean
public ContentNegotiator contentNegotiator() {
return new DefaultContentNegotiator();
}
@Bean
public ViewResolver soyViewResolver(final CompiledTemplatesHolder compiledTemplatesHolder,
final ModelAdjuster modelAdjuster,
final TemplateRenderer templateRenderer,
final LocaleProvider localeProvider,
final GlobalRuntimeModelResolver globalRuntimeModelResolver,
final ContentNegotiator contentNegotiator,
final SoyMsgBundleResolver msgBundleResolver)
throws Exception {
final SoyTemplateViewResolver soyTemplateViewResolver = new SoyTemplateViewResolver();
soyTemplateViewResolver.setSoyMsgBundleResolver(msgBundleResolver);
soyTemplateViewResolver.setCompiledTemplatesHolder(compiledTemplatesHolder);
soyTemplateViewResolver.setEncoding(encoding);
soyTemplateViewResolver.setGlobalRuntimeModelResolver(globalRuntimeModelResolver);
soyTemplateViewResolver.setHotReloadMode(hotReloadMode);
soyTemplateViewResolver.setIndexView(indexView);
soyTemplateViewResolver.setLocaleProvider(localeProvider);
soyTemplateViewResolver.setModelAdjuster(modelAdjuster);
soyTemplateViewResolver.setTemplateRenderer(templateRenderer);
soyTemplateViewResolver.setPrefix(logicalPrefix);
soyTemplateViewResolver.setOrder(order);
soyTemplateViewResolver.setRedirectContextRelative(true);
soyTemplateViewResolver.setRedirectHttp10Compatible(true);
soyTemplateViewResolver.setContentNegotiator(contentNegotiator);
return soyTemplateViewResolver;
}
}
|
Java
|
package userstoreauth.servlets;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import userstoreauth.model.UserVer2;
import userstoreauth.service.UserStoreMb;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
class EditUserTest {
@BeforeEach
void setUp() {
UserStoreMb us = new UserStoreMb();
us.deleteAll();
}
@Test
void editUser() throws ServletException, IOException {
EditUser editUser = new EditUser();
UserStoreMb us = new UserStoreMb();
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
when(request.getParameter("login")).thenReturn("login");
when(request.getParameter("password")).thenReturn("password0");
when(request.getParameter("name")).thenReturn("name0");
when(request.getParameter("email")).thenReturn("email0");
when(request.getParameter("role")).thenReturn("admin");
when(request.getParameter("country")).thenReturn("Россия");
when(request.getParameter("city")).thenReturn("Москва");
UserVer2 user = new UserVer2("login", "password", "name", "email", "Россия", "Москва", Timestamp.valueOf(LocalDateTime.now()), "user");
us.addUser(user);
assertEquals(user, us.getByLogin("login"));
editUser.doPost(request, response);
user.setPassword("password0");
user.setName("name0");
user.setEmail("email0");
user.setRole("admin");
assertEquals(user, us.getByLogin("login"));
}
}
|
Java
|
from socket import inet_ntoa
from struct import pack
def calcDottedNetmask(mask):
bits = 0
for i in xrange(32 - mask, 32):
bits |= (1 << i)
packed_value = pack('!I', bits)
addr = inet_ntoa(packed_value)
return addr
|
Java
|
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
#ifndef itkLaplacianImageFilter_h
#define itkLaplacianImageFilter_h
#include "itkImageToImageFilter.h"
namespace itk
{
/**
* \class LaplacianImageFilter
*
* This filter computes the Laplacian of a scalar-valued image. The Laplacian
* is an isotropic measure of the 2nd spatial derivative of an image. The
* Laplacian of an image highlights regions of rapid intensity change and is
* therefore often used for edge detection. Often, the Laplacian is applied to
* an image that has first been smoothed with a Gaussian filter in order to
* reduce its sensitivity to noise.
*
* \par
* The Laplacian at each pixel location is computed by convolution with the
* itk::LaplacianOperator.
*
* \par Inputs and Outputs
* The input to this filter is a scalar-valued itk::Image of arbitrary
* dimension. The output is a scalar-valued itk::Image.
*
* \warning The pixel type of the input and output images must be of real type
* (float or double). ConceptChecking is used here to enforce the input pixel
* type. You will get a compilation error if the pixel type of the input and
* output images is not float or double.
*
* \sa Image
* \sa Neighborhood
* \sa NeighborhoodOperator
* \sa NeighborhoodIterator
* \sa LaplacianOperator
*
* \ingroup ImageFeatureExtraction
* \ingroup ITKImageFeature
*
* \wiki
* \wikiexample{ImageProcessing/LaplacianImageFilter,Compute the Laplacian of an image}
* \endwiki
*/
template< typename TInputImage, typename TOutputImage >
class LaplacianImageFilter:
public ImageToImageFilter< TInputImage, TOutputImage >
{
public:
/** Standard "Self" & Superclass typedef. */
typedef LaplacianImageFilter Self;
typedef ImageToImageFilter< TInputImage, TOutputImage > Superclass;
/** Extract some information from the image types. Dimensionality
* of the two images is assumed to be the same. */
typedef typename TOutputImage::PixelType OutputPixelType;
typedef typename TOutputImage::InternalPixelType OutputInternalPixelType;
typedef typename TInputImage::PixelType InputPixelType;
typedef typename TInputImage::InternalPixelType InputInternalPixelType;
itkStaticConstMacro(InputImageDimension, unsigned int,
TInputImage::ImageDimension);
itkStaticConstMacro(ImageDimension, unsigned int,
TOutputImage::ImageDimension);
/** Image typedef support. */
typedef TInputImage InputImageType;
typedef TOutputImage OutputImageType;
typedef typename InputImageType::Pointer InputImagePointer;
/** Smart pointer typedef support. */
typedef SmartPointer< Self > Pointer;
typedef SmartPointer< const Self > ConstPointer;
/** Run-time type information (and related methods) */
itkTypeMacro(LaplacianImageFilter, ImageToImageFilter);
/** Method for creation through the object factory. */
itkNewMacro(Self);
/** LaplacianImageFilter needs a larger input requested region than
* the output requested region (larger in the direction of the
* derivative). As such, LaplacianImageFilter needs to provide an
* implementation for GenerateInputRequestedRegion() in order to
* inform the pipeline execution model.
*
* \sa ImageToImageFilter::GenerateInputRequestedRegion() */
virtual void GenerateInputRequestedRegion() ITK_OVERRIDE;
/** Enable/Disable using the image spacing information in
* calculations. Use this option if you want derivatives in
* physical space. Default is UseImageSpacingOn. */
itkBooleanMacro( UseImageSpacing );
/** Set/Get whether or not the filter will use the spacing of the input
image in its calculations */
itkSetMacro(UseImageSpacing, bool);
itkGetConstMacro(UseImageSpacing, bool);
#ifdef ITK_USE_CONCEPT_CHECKING
// Begin concept checking
itkConceptMacro( SameDimensionCheck,
( Concept::SameDimension< InputImageDimension, ImageDimension > ) );
itkConceptMacro( InputPixelTypeIsFloatingPointCheck,
( Concept::IsFloatingPoint< InputPixelType > ) );
itkConceptMacro( OutputPixelTypeIsFloatingPointCheck,
( Concept::IsFloatingPoint< OutputPixelType > ) );
// End concept checking
#endif
protected:
LaplacianImageFilter()
{
m_UseImageSpacing = true;
}
virtual ~LaplacianImageFilter() {}
/** Standard pipeline method. While this class does not implement a
* ThreadedGenerateData(), its GenerateData() delegates all
* calculations to an NeighborhoodOperatorImageFilter. Since the
* NeighborhoodOperatorImageFilter is multithreaded, this filter is
* multithreaded by default. */
void GenerateData() ITK_OVERRIDE;
void PrintSelf(std::ostream &, Indent) const ITK_OVERRIDE;
private:
ITK_DISALLOW_COPY_AND_ASSIGN(LaplacianImageFilter);
bool m_UseImageSpacing;
};
} // end namespace itk
#ifndef ITK_MANUAL_INSTANTIATION
#include "itkLaplacianImageFilter.hxx"
#endif
#endif
|
Java
|
import pytest
import salt.engines
from tests.support.mock import MagicMock, patch
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")
|
Java
|
//-------------------------------------------------------------------------------------
// ExportXmlParser.cpp
//
// Simple callback non-validating XML parser implementation.
//
// Microsoft XNA Developer Connection.
// Copyright © Microsoft Corporation. All rights reserved.
//-------------------------------------------------------------------------------------
#include "stdafx.h"
#include "ExportXmlParser.h"
namespace ATG
{
//-------------------------------------------------------------------------------------
// Name: XMLParser::XMLParser
//-------------------------------------------------------------------------------------
XMLParser::XMLParser()
{
m_pWritePtr = m_pWriteBuf;
m_pReadPtr = m_pReadBuf;
m_pISAXCallback = NULL;
m_hFile = INVALID_HANDLE_VALUE;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::~XMLParser
//-------------------------------------------------------------------------------------
XMLParser::~XMLParser()
{
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::FillBuffer
// Desc: Reads a block from the current open file
//-------------------------------------------------------------------------------------
VOID XMLParser::FillBuffer()
{
DWORD NChars;
m_pReadPtr = m_pReadBuf;
if( m_hFile == NULL )
{
if( m_uInXMLBufferCharsLeft > XML_READ_BUFFER_SIZE )
NChars = XML_READ_BUFFER_SIZE;
else
NChars = m_uInXMLBufferCharsLeft;
CopyMemory( m_pReadBuf, m_pInXMLBuffer, NChars );
m_uInXMLBufferCharsLeft -= NChars;
m_pInXMLBuffer += NChars;
}
else
{
ReadFile( m_hFile, m_pReadBuf, XML_READ_BUFFER_SIZE, &NChars, NULL );
}
m_dwCharsConsumed += NChars;
__int64 iProgress = ( (__int64)m_dwCharsConsumed * 1000 ) / (__int64)m_dwCharsTotal;
m_pISAXCallback->SetParseProgress( (DWORD)iProgress );
m_pReadBuf[ NChars ] = '\0';
m_pReadBuf[ NChars + 1] = '\0';
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::SkipNextAdvance
// Desc: Puts the last character read back on the input stream
//-------------------------------------------------------------------------------------
VOID XMLParser::SkipNextAdvance()
{
m_bSkipNextAdvance = TRUE;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ConsumeSpace
// Desc: Skips spaces in the current stream
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ConsumeSpace()
{
HRESULT hr;
// Skip spaces
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
while ( ( m_Ch == ' ' ) || ( m_Ch == '\t' ) ||
( m_Ch == '\n' ) || ( m_Ch == '\r' ) )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
SkipNextAdvance();
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ConvertEscape
// Desc: Copies and converts an escape sequence into m_pWriteBuf
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ConvertEscape()
{
HRESULT hr;
WCHAR wVal = 0;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// all escape sequences start with &, so ignore the first character
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '#' ) // character as hex or decimal
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == 'x' ) // hex number
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
while ( m_Ch != ';' )
{
wVal *= 16;
if ( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) )
{
wVal += m_Ch - '0';
}
else if ( ( m_Ch >= 'a' ) && ( m_Ch <= 'f' ) )
{
wVal += m_Ch - 'a' + 10;
}
else if ( ( m_Ch >= 'A' ) && ( m_Ch <= 'F' ) )
{
wVal += m_Ch - 'A' + 10;
}
else
{
Error( E_INVALID_XML_SYNTAX, "Expected hex digit as part of &#x escape sequence" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
}
else // decimal number
{
while ( m_Ch != ';' )
{
wVal *= 10;
if ( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) )
{
wVal += m_Ch - '0';
}
else
{
Error( E_INVALID_XML_SYNTAX, "Expected decimal digit as part of &# escape sequence" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
}
// copy character into the buffer
m_Ch = wVal;
return S_OK;
}
// must be an entity reference
WCHAR *pEntityRefVal = m_pWritePtr;
UINT EntityRefLen;
SkipNextAdvance();
if( FAILED( hr = AdvanceName() ) )
return hr;
EntityRefLen = (UINT)( m_pWritePtr - pEntityRefVal );
m_pWritePtr = pEntityRefVal;
if ( EntityRefLen == 0 )
{
Error( E_INVALID_XML_SYNTAX, "Expecting entity name after &" );
return E_INVALID_XML_SYNTAX;
}
if( !wcsncmp( pEntityRefVal, L"lt", EntityRefLen ) )
wVal = '<';
else if( !wcsncmp( pEntityRefVal, L"gt", EntityRefLen ) )
wVal = '>';
else if( !wcsncmp( pEntityRefVal, L"amp", EntityRefLen ) )
wVal = '&';
else if( !wcsncmp( pEntityRefVal, L"apos", EntityRefLen ) )
wVal = '\'';
else if( !wcsncmp( pEntityRefVal, L"quot", EntityRefLen ) )
wVal = '"';
else
{
Error( E_INVALID_XML_SYNTAX, "Unrecognized entity name after & - (should be lt, gt, amp, apos, or quot)" );
return E_INVALID_XML_SYNTAX; // return false if unrecognized token sequence
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != ';' )
{
Error( E_INVALID_XML_SYNTAX, "Expected terminating ; for entity reference" );
return E_INVALID_XML_SYNTAX; // malformed reference - needs terminating ;
}
m_Ch = wVal;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceAttrVal
// Desc: Copies an attribute value into m_pWrite buf, skipping surrounding quotes
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceAttrVal()
{
HRESULT hr;
WCHAR wQuoteChar;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( ( m_Ch != '"' ) && ( m_Ch != '\'' ) )
{
Error( E_INVALID_XML_SYNTAX, "Attribute values must be enclosed in quotes" );
return E_INVALID_XML_SYNTAX;
}
wQuoteChar = m_Ch;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
else if( m_Ch == wQuoteChar )
break;
else if( m_Ch == '&' )
{
SkipNextAdvance();
if( FAILED( hr = ConvertEscape() ) )
return hr;
}
else if( m_Ch == '<' )
{
Error( E_INVALID_XML_SYNTAX, "Illegal character '<' in element tag" );
return E_INVALID_XML_SYNTAX;
}
// copy character into the buffer
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
Error( E_INVALID_XML_SYNTAX, "Total element tag size may not be more than %d characters", XML_WRITE_BUFFER_SIZE );
return E_INVALID_XML_SYNTAX;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceName
// Desc: Copies a name into the m_pWriteBuf - returns TRUE on success, FALSE on failure
// Ignores leading whitespace. Currently does not support unicode names
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceName()
{
HRESULT hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( ( ( m_Ch < 'A' ) || ( m_Ch > 'Z' ) ) &&
( ( m_Ch < 'a' ) || ( m_Ch > 'z' ) ) &&
( m_Ch != '_' ) && ( m_Ch != ':' ) )
{
Error( E_INVALID_XML_SYNTAX, "Names must start with an alphabetic character or _ or :" );
return E_INVALID_XML_SYNTAX;
}
while( ( ( m_Ch >= 'A' ) && ( m_Ch <= 'Z' ) ) ||
( ( m_Ch >= 'a' ) && ( m_Ch <= 'z' ) ) ||
( ( m_Ch >= '0' ) && ( m_Ch <= '9' ) ) ||
( m_Ch == '_' ) || ( m_Ch == ':' ) ||
( m_Ch == '-' ) || ( m_Ch == '.' ) )
{
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
Error( E_INVALID_XML_SYNTAX, "Total element tag size may not be more than %d characters", XML_WRITE_BUFFER_SIZE );
return E_INVALID_XML_SYNTAX;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
SkipNextAdvance();
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceCharacter
// Desc: Copies the character at *m_pReadPtr to m_Ch
// handling difference in UTF16 / UTF8, and big/little endian
// and getting another chunk of the file if needed
// Returns S_OK if there are more characters, E_ABORT for no characters to read
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceCharacter( BOOL bOkToFail )
{
if( m_bSkipNextAdvance )
{
m_bSkipNextAdvance = FALSE;
return S_OK;
}
// If we hit EOF in the middle of a character,
// it's ok-- we'll just have a corrupt last character
// (the buffer is padded with double NULLs )
if( *m_pReadPtr == '\0' )
{
// Read more from the file
FillBuffer();
// We are at EOF if it is still NULL
if( *m_pReadPtr == '\0' )
{
if( !bOkToFail )
{
Error( E_INVALID_XML_SYNTAX, "Unexpected EOF while parsing XML file" );
return E_INVALID_XML_SYNTAX;
}
else
{
return E_FAIL;
}
}
}
if( m_bUnicode == FALSE )
{
m_Ch = *((CHAR *)m_pReadPtr);
m_pReadPtr++;
}
else // if( m_bUnicode == TRUE )
{
m_Ch = *((WCHAR *)m_pReadPtr);
if( m_bReverseBytes )
{
m_Ch = ( m_Ch << 8 ) + ( m_Ch >> 8 );
}
m_pReadPtr += 2;
}
if( m_Ch == '\n' )
{
m_pISAXCallback->m_LineNum++;
m_pISAXCallback->m_LinePos = 0;
}
else if( m_Ch != '\r' )
m_pISAXCallback->m_LinePos++;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceElement
// Desc: Builds <element> data, calls callback
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceElement()
{
HRESULT hr;
// write ptr at the beginning of the buffer
m_pWritePtr = m_pWriteBuf;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// if first character wasn't '<', we wouldn't be here
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch == '!' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '-' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '-' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '-' after '<!-'" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceComment() ) )
return hr;
return S_OK;
}
if( m_Ch != '[' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'C' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'D' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'A' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'T' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != 'A' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '[' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '<![CDATA['" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = AdvanceCDATA() ) )
return hr;
}
else if( m_Ch == '/' )
{
WCHAR *pEntityRefVal = m_pWritePtr;
if( FAILED( hr = AdvanceName() ) )
return hr;
if( FAILED( m_pISAXCallback->ElementEnd( pEntityRefVal,
(UINT) ( m_pWritePtr - pEntityRefVal ) ) ) )
return E_ABORT;
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '>' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '>' after name for closing entity reference" );
return E_INVALID_XML_SYNTAX;
}
}
else if( m_Ch == '?' )
{
// just skip any xml header tag since not really important after identifying character set
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if ( m_Ch == '>' )
return S_OK;
}
}
else
{
XMLAttribute Attributes[ XML_MAX_ATTRIBUTES_PER_ELEMENT ];
UINT NumAttrs;
WCHAR *pEntityRefVal = m_pWritePtr;
UINT EntityRefLen;
NumAttrs = 0;
SkipNextAdvance();
// Entity tag
if( FAILED( hr = AdvanceName() ) )
return hr;
EntityRefLen = (UINT)( m_pWritePtr - pEntityRefVal );
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
// read attributes
while( ( m_Ch != '>' ) && ( m_Ch != '/' ) )
{
SkipNextAdvance();
if ( NumAttrs >= XML_MAX_ATTRIBUTES_PER_ELEMENT )
{
Error( E_INVALID_XML_SYNTAX, "Elements may not have more than %d attributes", XML_MAX_ATTRIBUTES_PER_ELEMENT );
return E_INVALID_XML_SYNTAX;
}
Attributes[ NumAttrs ].strName = m_pWritePtr;
// Attribute name
if( FAILED( hr = AdvanceName() ) )
return hr;
Attributes[ NumAttrs ].NameLen = (UINT)( m_pWritePtr - Attributes[ NumAttrs ].strName );
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '=' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '=' character after attribute name" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( hr = ConsumeSpace() ) )
return hr;
Attributes[ NumAttrs ].strValue = m_pWritePtr;
if( FAILED( hr = AdvanceAttrVal() ) )
return hr;
Attributes[ NumAttrs ].ValueLen = (UINT)( m_pWritePtr -
Attributes[ NumAttrs ].strValue );
++NumAttrs;
if( FAILED( hr = ConsumeSpace() ) )
return hr;
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
}
if( m_Ch == '/' )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if( m_Ch != '>' )
{
Error( E_INVALID_XML_SYNTAX, "Expecting '>' after '/' in element tag" );
return E_INVALID_XML_SYNTAX;
}
if( FAILED( m_pISAXCallback->ElementBegin( pEntityRefVal, EntityRefLen,
Attributes, NumAttrs ) ) )
return E_ABORT;
if( FAILED( m_pISAXCallback->ElementEnd( pEntityRefVal, EntityRefLen ) ) )
return E_ABORT;
}
else
{
if( FAILED( m_pISAXCallback->ElementBegin( pEntityRefVal, EntityRefLen,
Attributes, NumAttrs ) ) )
return E_ABORT;
}
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceCDATA
// Desc: Read a CDATA section
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceCDATA()
{
HRESULT hr;
WORD wStage = 0;
if( FAILED( m_pISAXCallback->CDATABegin() ) )
return E_ABORT;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( ( m_Ch == ']' ) && ( wStage == 0 ) )
wStage = 1;
else if( ( m_Ch == ']' ) && ( wStage == 1 ) )
wStage = 2;
else if( ( m_Ch == '>' ) && ( wStage == 2 ) )
{
m_pWritePtr -= 3;
break;
}
else
wStage = 0;
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
if( FAILED( m_pISAXCallback->CDATAData( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), TRUE ) ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
}
}
if( FAILED( m_pISAXCallback->CDATAData( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
if( FAILED( m_pISAXCallback->CDATAEnd() ) )
return E_ABORT;
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::AdvanceComment
// Desk: Skips over a comment
//-------------------------------------------------------------------------------------
HRESULT XMLParser::AdvanceComment()
{
HRESULT hr;
WORD wStage;
wStage = 0;
for( ;; )
{
if( FAILED( hr = AdvanceCharacter() ) )
return hr;
if (( m_Ch == '-' ) && ( wStage == 0 ))
wStage = 1;
else if (( m_Ch == '-' ) && ( wStage == 1 ))
wStage = 2;
else if (( m_Ch == '>' ) && ( wStage == 2 ))
break;
else
wStage = 0;
}
return S_OK;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::RegisterSAXCallbackInterface
// Desc: Registers callback interface
//-------------------------------------------------------------------------------------
VOID XMLParser::RegisterSAXCallbackInterface( ISAXCallback *pISAXCallback )
{
m_pISAXCallback = pISAXCallback;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::GetSAXCallbackInterface
// Desc: Returns current callback interface
//-------------------------------------------------------------------------------------
ISAXCallback* XMLParser::GetSAXCallbackInterface()
{
return m_pISAXCallback;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::MainParseLoop
// Desc: Main Loop to Parse Data - source agnostic
//-------------------------------------------------------------------------------------
HRESULT XMLParser::MainParseLoop()
{
BOOL bWhiteSpaceOnly = TRUE;
HRESULT hr = S_OK;
if( FAILED( m_pISAXCallback->StartDocument() ) )
return E_ABORT;
m_pWritePtr = m_pWriteBuf;
FillBuffer();
if ( *((WCHAR *) m_pReadBuf ) == 0xFEFF )
{
m_bUnicode = TRUE;
m_bReverseBytes = FALSE;
m_pReadPtr += 2;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0xFFFE )
{
m_bUnicode = TRUE;
m_bReverseBytes = TRUE;
m_pReadPtr += 2;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0x003C )
{
m_bUnicode = TRUE;
m_bReverseBytes = FALSE;
}
else if ( *((WCHAR *) m_pReadBuf ) == 0x3C00 )
{
m_bUnicode = TRUE;
m_bReverseBytes = TRUE;
}
else if ( m_pReadBuf[ 0 ] == 0x3C )
{
m_bUnicode = FALSE;
m_bReverseBytes = FALSE;
}
else
{
Error( E_INVALID_XML_SYNTAX, "Unrecognized encoding (parser does not support UTF-8 language encodings)" );
return E_INVALID_XML_SYNTAX;
}
for( ;; )
{
if( FAILED( AdvanceCharacter( TRUE ) ) )
{
if ( ( (UINT) ( m_pWritePtr - m_pWriteBuf ) != 0 ) && ( !bWhiteSpaceOnly ) )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
bWhiteSpaceOnly = TRUE;
}
if( FAILED( m_pISAXCallback->EndDocument() ) )
return E_ABORT;
return S_OK;
}
if( m_Ch == '<' )
{
if( ( (UINT) ( m_pWritePtr - m_pWriteBuf ) != 0 ) && ( !bWhiteSpaceOnly ) )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf, (UINT)( m_pWritePtr - m_pWriteBuf ), FALSE ) ) )
return E_ABORT;
bWhiteSpaceOnly = TRUE;
}
SkipNextAdvance();
m_pWritePtr = m_pWriteBuf;
if( FAILED( hr = AdvanceElement() ) )
return hr;
m_pWritePtr = m_pWriteBuf;
}
else
{
if( m_Ch == '&' )
{
SkipNextAdvance();
if( FAILED( hr = ConvertEscape() ) )
return hr;
}
if( bWhiteSpaceOnly && ( m_Ch != ' ' ) && ( m_Ch != '\n' ) && ( m_Ch != '\r' ) &&
( m_Ch != '\t' ) )
{
bWhiteSpaceOnly = FALSE;
}
*m_pWritePtr = m_Ch;
m_pWritePtr++;
if( m_pWritePtr - m_pWriteBuf >= XML_WRITE_BUFFER_SIZE )
{
if( !bWhiteSpaceOnly )
{
if( FAILED( m_pISAXCallback->ElementContent( m_pWriteBuf,
( UINT ) ( m_pWritePtr - m_pWriteBuf ),
TRUE ) ) )
{
return E_ABORT;
}
}
m_pWritePtr = m_pWriteBuf;
bWhiteSpaceOnly = TRUE;
}
}
}
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ParseXMLFile
// Desc: Builds element data
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ParseXMLFile( CONST CHAR *strFilename )
{
HRESULT hr;
if( m_pISAXCallback == NULL )
return E_NOINTERFACE;
m_pISAXCallback->m_LineNum = 1;
m_pISAXCallback->m_LinePos = 0;
m_pISAXCallback->m_strFilename = strFilename; // save this off only while we parse the file
m_bSkipNextAdvance = FALSE;
m_pReadPtr = m_pReadBuf;
m_pReadBuf[ 0 ] = '\0';
m_pReadBuf[ 1 ] = '\0';
m_pInXMLBuffer = NULL;
m_uInXMLBufferCharsLeft = 0;
m_hFile = CreateFile( strFilename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_SEQUENTIAL_SCAN, NULL );
if( m_hFile == INVALID_HANDLE_VALUE )
{
Error( E_COULD_NOT_OPEN_FILE, "Error opening file" );
hr = E_COULD_NOT_OPEN_FILE;
}
else
{
LARGE_INTEGER iFileSize;
GetFileSizeEx( m_hFile, &iFileSize );
m_dwCharsTotal = (DWORD)iFileSize.QuadPart;
m_dwCharsConsumed = 0;
hr = MainParseLoop();
}
// Close the file
if( m_hFile != INVALID_HANDLE_VALUE )
CloseHandle( m_hFile );
m_hFile = INVALID_HANDLE_VALUE;
// we no longer own strFilename, so un-set it
m_pISAXCallback->m_strFilename = NULL;
return hr;
}
//-------------------------------------------------------------------------------------
// Name: XMLParser::ParseXMLFile
// Desc: Builds element data
//-------------------------------------------------------------------------------------
HRESULT XMLParser::ParseXMLBuffer( CONST CHAR *strBuffer, UINT uBufferSize )
{
HRESULT hr;
if( m_pISAXCallback == NULL )
return E_NOINTERFACE;
m_pISAXCallback->m_LineNum = 1;
m_pISAXCallback->m_LinePos = 0;
m_pISAXCallback->m_strFilename = ""; // save this off only while we parse the file
m_bSkipNextAdvance = FALSE;
m_pReadPtr = m_pReadBuf;
m_pReadBuf[ 0 ] = '\0';
m_pReadBuf[ 1 ] = '\0';
m_hFile = NULL;
m_pInXMLBuffer = strBuffer;
m_uInXMLBufferCharsLeft = uBufferSize;
m_dwCharsTotal = m_uInXMLBufferCharsLeft;
m_dwCharsConsumed = 0;
hr = MainParseLoop();
// we no longer own strFilename, so un-set it
m_pISAXCallback->m_strFilename = NULL;
return hr;
}
//-------------------------------------------------------------------------------------
// XMLParser::Error()
// Logs an error through the callback interface
//-------------------------------------------------------------------------------------
VOID XMLParser::Error( HRESULT hErr, CONST CHAR* strFormat, ... )
{
CONST INT MAX_OUTPUT_STR = 160;
CHAR strBuffer[ MAX_OUTPUT_STR ];
va_list pArglist;
va_start( pArglist, strFormat );
vsprintf_s( strBuffer, strFormat, pArglist );
m_pISAXCallback->Error( hErr, strBuffer );
va_end( pArglist );
}
} // namespace ATG
|
Java
|
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.execution.testframework.sm.runner;
import com.intellij.execution.testframework.sm.SMTestRunnerConnectionUtil;
import com.intellij.execution.testframework.sm.runner.events.*;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Key;
import com.intellij.util.containers.ContainerUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import java.util.*;
/**
* This class fires events to SMTRunnerEventsListener in event dispatch thread.
*
* @author: Roman Chernyatchik
*/
public class GeneralToSMTRunnerEventsConvertor extends GeneralTestEventsProcessor {
private final Map<String, SMTestProxy> myRunningTestsFullNameToProxy = ContainerUtil.newConcurrentMap();
private final TestSuiteStack mySuitesStack;
private final Map<String, List<SMTestProxy>> myCurrentChildren = new HashMap<>();
private boolean myIsTestingFinished;
public GeneralToSMTRunnerEventsConvertor(Project project, @NotNull SMTestProxy.SMRootTestProxy testsRootNode,
@NotNull String testFrameworkName) {
super(project, testFrameworkName, testsRootNode);
mySuitesStack = new TestSuiteStack(testFrameworkName);
}
@Override
protected SMTestProxy createProxy(String testName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy proxy = super.createProxy(testName, locationHint, metaInfo, id, parentNodeId);
SMTestProxy currentSuite = getCurrentSuite();
currentSuite.addChild(proxy);
return proxy;
}
@Override
protected SMTestProxy createSuite(String suiteName, String locationHint, String metaInfo, String id, String parentNodeId) {
SMTestProxy newSuite = super.createSuite(suiteName, locationHint, metaInfo, id, parentNodeId);
final SMTestProxy parentSuite = getCurrentSuite();
parentSuite.addChild(newSuite);
mySuitesStack.pushSuite(newSuite);
return newSuite;
}
@Override
public void onSuiteTreeEnded(String suiteName) {
myBuildTreeRunnables.add(() -> mySuitesStack.popSuite(suiteName));
super.onSuiteTreeEnded(suiteName);
}
@Override
public void onStartTesting() {
//fire
mySuitesStack.pushSuite(myTestsRootProxy);
myTestsRootProxy.setStarted();
//fire
fireOnTestingStarted(myTestsRootProxy);
}
@Override
public void onTestsReporterAttached() {
fireOnTestsReporterAttached(myTestsRootProxy);
}
@Override
public void onFinishTesting() {
fireOnBeforeTestingFinished(myTestsRootProxy);
// has been already invoked!
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (myIsTestingFinished) {
// has been already invoked!
return;
}
myIsTestingFinished = true;
// We don't know whether process was destroyed by user
// or it finished after all tests have been run
// Lets assume, if at finish all suites except root suite are passed
// then all is ok otherwise process was terminated by user
if (!isTreeComplete(myRunningTestsFullNameToProxy.keySet(), myTestsRootProxy)) {
myTestsRootProxy.setTerminated();
myRunningTestsFullNameToProxy.clear();
}
mySuitesStack.clear();
myTestsRootProxy.setFinished();
myCurrentChildren.clear();
//fire events
fireOnTestingFinished(myTestsRootProxy);
super.onFinishTesting();
}
@Override
public void setPrinterProvider(@NotNull TestProxyPrinterProvider printerProvider) {
}
@Override
public void onTestStarted(@NotNull final TestStartedEvent testStartedEvent) {
//Duplicated event
// creates test
// adds to running tests map
//Progress started
//fire events
final String testName = testStartedEvent.getName();
final String locationUrl = testStartedEvent.getLocationUrl();
final boolean isConfig = testStartedEvent.isConfig();
final String fullName = getFullTestName(testName);
if (myRunningTestsFullNameToProxy.containsKey(fullName)) {
//Duplicated event
logProblem("Test [" + fullName + "] has been already started");
if (SMTestRunnerConnectionUtil.isInDebugMode()) {
return;
}
}
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy testProxy = findChild(parentSuite, locationUrl != null ? locationUrl : fullName, false);
if (testProxy == null) {
// creates test
testProxy = new SMTestProxy(testName, false, locationUrl, testStartedEvent.getMetainfo(), false);
testProxy.setConfig(isConfig);
if (myTreeBuildBeforeStart) testProxy.setTreeBuildBeforeStart();
if (myLocator != null) {
testProxy.setLocator(myLocator);
}
parentSuite.addChild(testProxy);
}
// adds to running tests map
myRunningTestsFullNameToProxy.put(fullName, testProxy);
//Progress started
testProxy.setStarted();
//fire events
fireOnTestStarted(testProxy);
}
@Override
public void onSuiteStarted(@NotNull final TestSuiteStartedEvent suiteStartedEvent) {
//new suite
//Progress started
//fire event
final String suiteName = suiteStartedEvent.getName();
final String locationUrl = suiteStartedEvent.getLocationUrl();
SMTestProxy parentSuite = getCurrentSuite();
SMTestProxy newSuite = findChild(parentSuite, locationUrl != null ? locationUrl : suiteName, true);
if (newSuite == null) {
//new suite
newSuite = new SMTestProxy(suiteName, true, locationUrl, suiteStartedEvent.getMetainfo(), parentSuite.isPreservePresentableName());
if (myTreeBuildBeforeStart) {
newSuite.setTreeBuildBeforeStart();
}
if (myLocator != null) {
newSuite.setLocator(myLocator);
}
parentSuite.addChild(newSuite);
}
initCurrentChildren(newSuite, true);
mySuitesStack.pushSuite(newSuite);
//Progress started
newSuite.setSuiteStarted();
//fire event
fireOnSuiteStarted(newSuite);
}
private void initCurrentChildren(SMTestProxy newSuite, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
for (SMTestProxy proxy : newSuite.getChildren()) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
String url = proxy.getLocationUrl();
if (url != null) {
myCurrentChildren.computeIfAbsent(url, l -> new ArrayList<>()).add(proxy);
}
myCurrentChildren.computeIfAbsent(proxy.getName(), l -> new ArrayList<>()).add(proxy);
}
}
}
}
private SMTestProxy findChild(SMTestProxy parentSuite, String fullName, boolean preferSuite) {
if (myTreeBuildBeforeStart) {
Set<SMTestProxy> acceptedProxies = new LinkedHashSet<>();
Collection<? extends SMTestProxy> children = myCurrentChildren.get(fullName);
if (children == null) {
initCurrentChildren(parentSuite, preferSuite);
children = myCurrentChildren.get(fullName);
}
if (children != null) { //null if child started second time
for (SMTestProxy proxy : children) {
if (!proxy.isFinal() || preferSuite && proxy.isSuite()) {
acceptedProxies.add(proxy);
}
}
if (!acceptedProxies.isEmpty()) {
return acceptedProxies.stream()
.filter(proxy -> proxy.isSuite() == preferSuite && proxy.getParent() == parentSuite)
.findFirst()
.orElse(acceptedProxies.iterator().next());
}
}
}
return null;
}
@Override
public void onTestFinished(@NotNull final TestFinishedEvent testFinishedEvent) {
final String testName = testFinishedEvent.getName();
final Long duration = testFinishedEvent.getDuration();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFinished event: name = {" + testName + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.setDuration(duration != null ? duration : 0);
testProxy.setFrameworkOutputFile(testFinishedEvent.getOutputFile());
testProxy.setFinished();
myRunningTestsFullNameToProxy.remove(fullTestName);
clearCurrentChildren(fullTestName, testProxy);
//fire events
fireOnTestFinished(testProxy);
}
private void clearCurrentChildren(String fullTestName, SMTestProxy testProxy) {
myCurrentChildren.remove(fullTestName);
String url = testProxy.getLocationUrl();
if (url != null) {
myCurrentChildren.remove(url);
}
}
@Override
public void onSuiteFinished(@NotNull final TestSuiteFinishedEvent suiteFinishedEvent) {
//fire events
final String suiteName = suiteFinishedEvent.getName();
final SMTestProxy mySuite = mySuitesStack.popSuite(suiteName);
if (mySuite != null) {
mySuite.setFinished();
myCurrentChildren.remove(suiteName);
String locationUrl = mySuite.getLocationUrl();
if (locationUrl != null) {
myCurrentChildren.remove(locationUrl);
}
//fire events
fireOnSuiteFinished(mySuite);
}
}
@Override
public void onUncapturedOutput(@NotNull final String text, final Key outputType) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addOutput(text, outputType);
}
@Override
public void onError(@NotNull final String localizedMessage,
@Nullable final String stackTrace,
final boolean isCritical) {
final SMTestProxy currentProxy = findCurrentTestOrSuite();
currentProxy.addError(localizedMessage, stackTrace, isCritical);
}
@Override
public void onTestFailure(@NotNull final TestFailedEvent testFailedEvent) {
// if hasn't been already reported
// 1. report
// 2. add failure
// fire event
final String testName = testFailedEvent.getName();
if (testName == null) {
logProblem("No test name specified in " + testFailedEvent);
return;
}
final String localizedMessage = testFailedEvent.getLocalizedFailureMessage();
final String stackTrace = testFailedEvent.getStacktrace();
final boolean isTestError = testFailedEvent.isTestError();
final String comparisionFailureActualText = testFailedEvent.getComparisonFailureActualText();
final String comparisionFailureExpectedText = testFailedEvent.getComparisonFailureExpectedText();
final boolean inDebugMode = SMTestRunnerConnectionUtil.isInDebugMode();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestFailure event: name = {" + testName + "}" +
", message = {" + localizedMessage + "}" +
", stackTrace = {" + stackTrace + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (inDebugMode) {
return;
}
else {
// if hasn't been already reported
// 1. report
onTestStarted(new TestStartedEvent(testName, null));
// 2. add failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
if (comparisionFailureActualText != null && comparisionFailureExpectedText != null) {
testProxy.setTestComparisonFailed(localizedMessage, stackTrace, comparisionFailureActualText, comparisionFailureExpectedText,
testFailedEvent);
}
else if (comparisionFailureActualText == null && comparisionFailureExpectedText == null) {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
}
else {
testProxy.setTestFailed(localizedMessage, stackTrace, isTestError);
logProblem("Comparison failure actual and expected texts should be both null or not null.\n"
+ "Expected:\n"
+ comparisionFailureExpectedText + "\n"
+ "Actual:\n"
+ comparisionFailureActualText);
}
// fire event
fireOnTestFailed(testProxy);
}
@Override
public void onTestIgnored(@NotNull final TestIgnoredEvent testIgnoredEvent) {
// try to fix
// 1. report test opened
// 2. report failure
// fire event
final String testName = testIgnoredEvent.getName();
if (testName == null) {
logProblem("TestIgnored event: no name");
}
String ignoreComment = testIgnoredEvent.getIgnoreComment();
final String stackTrace = testIgnoredEvent.getStacktrace();
final String fullTestName = getFullTestName(testName);
SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
final boolean debugMode = SMTestRunnerConnectionUtil.isInDebugMode();
logProblem("Test wasn't started! " +
"TestIgnored event: name = {" + testName + "}, " +
"message = {" + ignoreComment + "}. " +
cannotFindFullTestNameMsg(fullTestName));
if (debugMode) {
return;
}
else {
// try to fix
// 1. report test opened
onTestStarted(new TestStartedEvent(testName, null));
// 2. report failure
testProxy = getProxyByFullTestName(fullTestName);
}
}
if (testProxy == null) {
return;
}
testProxy.setTestIgnored(ignoreComment, stackTrace);
// fire event
fireOnTestIgnored(testProxy);
}
@Override
public void onTestOutput(@NotNull final TestOutputEvent testOutputEvent) {
final String testName = testOutputEvent.getName();
final String text = testOutputEvent.getText();
final Key outputType = testOutputEvent.getOutputType();
final String fullTestName = getFullTestName(testName);
final SMTestProxy testProxy = getProxyByFullTestName(fullTestName);
if (testProxy == null) {
logProblem("Test wasn't started! TestOutput event: name = {" + testName + "}, " +
"outputType = " + outputType + ", " +
"text = {" + text + "}. " +
cannotFindFullTestNameMsg(fullTestName));
return;
}
testProxy.addOutput(text, outputType);
}
@Override
public void onTestsCountInSuite(final int count) {
fireOnTestsCountInSuite(count);
}
@NotNull
protected final SMTestProxy getCurrentSuite() {
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
if (currentSuite != null) {
return currentSuite;
}
// current suite shouldn't be null otherwise test runner isn't correct
// or may be we are in debug mode
logProblem("Current suite is undefined. Root suite will be used.");
return myTestsRootProxy;
}
protected String getFullTestName(final String testName) {
// Test name should be unique
return testName;
}
protected int getRunningTestsQuantity() {
return myRunningTestsFullNameToProxy.size();
}
@Nullable
protected SMTestProxy getProxyByFullTestName(final String fullTestName) {
return myRunningTestsFullNameToProxy.get(fullTestName);
}
@TestOnly
protected void clearInternalSuitesStack() {
mySuitesStack.clear();
}
private String cannotFindFullTestNameMsg(String fullTestName) {
return "Cant find running test for ["
+ fullTestName
+ "]. Current running tests: {"
+ dumpRunningTestsNames() + "}";
}
private StringBuilder dumpRunningTestsNames() {
final Set<String> names = myRunningTestsFullNameToProxy.keySet();
final StringBuilder namesDump = new StringBuilder();
for (String name : names) {
namesDump.append('[').append(name).append(']').append(',');
}
return namesDump;
}
/*
* Remove listeners, etc
*/
@Override
public void dispose() {
super.dispose();
if (!myRunningTestsFullNameToProxy.isEmpty()) {
final Application application = ApplicationManager.getApplication();
if (!application.isHeadlessEnvironment() && !application.isUnitTestMode()) {
logProblem("Not all events were processed! " + dumpRunningTestsNames());
}
}
myRunningTestsFullNameToProxy.clear();
mySuitesStack.clear();
}
private SMTestProxy findCurrentTestOrSuite() {
//if we can locate test - we will send output to it, otherwise to current test suite
SMTestProxy currentProxy = null;
Iterator<SMTestProxy> iterator = myRunningTestsFullNameToProxy.values().iterator();
if (iterator.hasNext()) {
//current test
currentProxy = iterator.next();
if (iterator.hasNext()) { //if there are multiple tests running call put output to the suite
currentProxy = null;
}
}
if (currentProxy == null) {
//current suite
//
// ProcessHandler can fire output available event before processStarted event
final SMTestProxy currentSuite = mySuitesStack.getCurrentSuite();
currentProxy = currentSuite != null ? currentSuite : myTestsRootProxy;
}
return currentProxy;
}
}
|
Java
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Data;
using System.Data.SQLite;
using System.Data.Common;
using System.IO;
namespace GatherAll
{
class Cls_SqliteMng
{
//string m_DBName = "";
//string connStr = "";
//创建一个数据库文件,保存在当前目录下HyData文件夹下
//
public void CreateDB(string dbName)
{
// string databaseFileName = System.Environment.CurrentDirectory + @"/HyData/" + dbName;
SQLiteConnection.CreateFile(dbName);
}
//执行Sql语句
//创建一个表: ExecuteSql("create table HyTest(TestID TEXT)");
//插入些数据: ExecuteSql("insert into HyTest(TestID) values('1001')");
public void ExecuteSql(string sqlStr, string strConStr)
{
//connStr = connStr1 + m_DBName + connStr;
using (DbConnection conn = new SQLiteConnection(strConStr))
{
conn.Open();
DbCommand comm = conn.CreateCommand();
comm.CommandText = sqlStr;
comm.CommandType = CommandType.Text;
comm.ExecuteNonQuery();
}
}
////执行查询返回DataSet
//private DataSet ExecDataSet(string sqlStr)
//{
// //connStr = "";
// //connStr = connStr1 + m_DBName + connStr;
// using (SQLiteConnection conn = new SQLiteConnection(sqlStr))
// {
// conn.Open();
// SQLiteCommand cmd = conn.CreateCommand();
// cmd.CommandText = sqlStr;
// cmd.CommandType = CommandType.Text;
// SQLiteDataAdapter da = new SQLiteDataAdapter(cmd);
// DataSet ds = new DataSet();
// da.Fill(ds);
// return ds;
// }
//}
}
}
|
Java
|
package br.copacabana;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import javax.cache.Cache;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.web.servlet.ModelAndView;
import br.com.copacabana.cb.entities.Address;
import br.com.copacabana.cb.entities.Client;
import br.com.copacabana.cb.entities.MealOrder;
import br.com.copacabana.cb.entities.OrderedPlate;
import br.com.copacabana.cb.entities.Plate;
import br.com.copacabana.cb.entities.Restaurant;
import br.com.copacabana.cb.entities.TurnType;
import br.com.copacabana.cb.entities.WorkingHours.DayOfWeek;
import br.copacabana.order.paypal.PayPalProperties.PayPalConfKeys;
import br.copacabana.spring.AddressManager;
import br.copacabana.spring.ClientManager;
import br.copacabana.spring.ConfigurationManager;
import br.copacabana.spring.PlateManager;
import br.copacabana.spring.RestaurantManager;
import br.copacabana.usecase.control.UserActionManager;
import br.copacabana.util.TimeController;
import com.google.appengine.api.datastore.Key;
import com.google.appengine.api.datastore.KeyFactory;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
/**
* @author Rafael Coutinho
*/
public class PlaceOrderController extends JsonViewController {
private String formView;
private String successView;
@Override
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception {
Map<String, Object> model = new HashMap<String, Object>();
model.put("mode", "view");
try {
Cache cache = CacheController.getCache();
if (cache.get(PayPalConfKeys.pppFixedRate.name()) == null) {
ConfigurationManager cm = new ConfigurationManager();
cache.put(PayPalConfKeys.pppFixedRate.name(), cm.getConfigurationValue(PayPalConfKeys.pppFixedRate.name()));
cache.put(PayPalConfKeys.pppPercentageValue.name(), cm.getConfigurationValue(PayPalConfKeys.pppPercentageValue.name()));
}
if (!Authentication.isUserLoggedIn(request.getSession())) {
String orderData = request.getParameter("orderData");
request.getSession().setAttribute("orderData", orderData);
model.put("forwardUrl", "/continueOrder.jsp");
UserActionManager.startOrderNotLogged(orderData, request.getSession().getId());
return new ModelAndView(getFormView(), model);
} else {
String orderData = "";
JsonObject user = Authentication.getLoggedUser(request.getSession());
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
if (request.getParameter("orderData") == null) {
orderData = (String) request.getSession().getAttribute("orderData");
} else {
orderData = request.getParameter("orderData");
}
log.log(Level.INFO, "OrderJSon: {0}", orderData);
JsonParser pa = new JsonParser();
JsonObject orderDataJson = (JsonObject) pa.parse(orderData);
ClientManager cman = new ClientManager();
Client c = cman.find(KeyFactory.stringToKey(loggedUserId), Client.class);
MealOrder mo = getMealOrder(c, orderDataJson);
request.getSession().setAttribute("clientPhone", "");
DateSerializer dateSerializer = new DateSerializer(request);
DateDeSerializer dateDeSerializer = new DateDeSerializer(request);
GsonBuilder gsonBuilder = GsonBuilderFactory.getInstance();// new
// GsonBuilder().setPrettyPrinting().serializeNulls().excludeFieldsWithoutExposeAnnotation();
gsonBuilder.registerTypeAdapter(Date.class, dateSerializer);
gsonBuilder.registerTypeAdapter(Date.class, dateDeSerializer);
gsonBuilder.registerTypeAdapter(Key.class, new KeyDeSerializer());
gsonBuilder.registerTypeAdapter(Key.class, new KeySerializer());
Gson gson = gsonBuilder.create();
model.putAll(updateModelData(mo, c, gson));
String json = gson.toJson(mo); // Or use new
json = GsonBuilderFactory.escapeString(json);
request.getSession().setAttribute("orderData", json);
UserActionManager.startOrder(json, loggedUserId, request.getSession().getId());
return new ModelAndView(getSuccessView(), model);
}
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to place order.");
try {
String orderData = "";
log.log(Level.SEVERE, "Checking logged user.");
JsonObject user = Authentication.getLoggedUser(request.getSession());
if (user == null) {
log.log(Level.SEVERE, "user is not logged in.");
}
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
log.log(Level.SEVERE, "logged user id {0}", loggedUserId);
if (request.getParameter("orderData") == null) {
log.log(Level.SEVERE, "Order is not in request, checking session");
orderData = (String) request.getSession().getAttribute("orderData");
} else {
log.log(Level.SEVERE, "Order is in request");
orderData = request.getParameter("orderData");
}
if (orderData == null) {
log.log(Level.SEVERE, "Order was null!");
}
log.log(Level.SEVERE, "Order is order :" + orderData);
log.log(Level.SEVERE, "Exception was {0}.", e);
log.log(Level.SEVERE, "Error was {0}.", e.getMessage());
UserActionManager.registerMajorError(request, e, loggedUserId, request.getSession().getId(), "placing order");
} catch (Exception ex) {
log.log(Level.SEVERE, "Failed during loggin of error was {0}.", e);
UserActionManager.registerMajorError(request, e, "placing order 2");
}
throw e;
}
}
public static Map<String, Object> updateModelData(MealOrder mo, Client c, Gson gson) {
Map<String, Object> model = new HashMap<String, Object>();
RestaurantManager rman = new RestaurantManager();
Restaurant r = rman.getRestaurant(mo.getRestaurant());
Boolean b = r.getOnlyForRetrieval();
if (b != null && true == b) {
model.put("onlyForRetrieval", Boolean.TRUE);
} else {
model.put("onlyForRetrieval", Boolean.FALSE);
}
model.put("restaurantAddressKey", KeyFactory.keyToString(r.getAddress()));
model.put("clientCpf", c.getCpf());
model.put("level", c.getLevel().ordinal());
JsonObject json = new JsonObject();
ConfigurationManager cm = new ConfigurationManager();
String hasSpecificLogic = cm.getConfigurationValue("hasSpecificLogic");
model.put("noTakeAwayOrders", "false");
if (hasSpecificLogic != null && hasSpecificLogic.endsWith("true")) {
json = getSteakHouseSpecificData(mo, c, gson);
getMakisSpecificLogic(mo, c, gson, json);
getPapagaiosSpecificLogic(mo, c, gson, json);
getPizzadoroSpecificLogic(mo,c,gson,json);
if (noTakeAwayOrders(mo) == true) {
model.put("noTakeAwayOrders", "true");
}
}
model.put("hasSpecificLogic", json.toString());
if (json.get("javascript") != null && json.get("javascript").getAsString().length() > 0) {
model.put("hasSpecificLogicJavascript", json.get("javascript").getAsString());
}
Address restAddress = new AddressManager().getAddress(r.getAddress());
model.put("restaurantAddress", gson.toJson(restAddress));
return model;
}
private static boolean noTakeAwayOrders(MealOrder mo) {
ConfigurationManager cm = new ConfigurationManager();
String ids = cm.getConfigurationValue("no.takeaway.ids");
String restId = KeyFactory.keyToString(mo.getRestaurant());
if (ids.contains(restId)) {
return true;
}
return false;
}
private static void getPapagaiosSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("papagaios.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/papagaios.js"));
}
}
}
private static void getPizzadoroSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("pizzadoro.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/pizzadoro.js"));
}
}
}
private static void getMakisSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
try {
ConfigurationManager cm = new ConfigurationManager();
PlateManager pm = new PlateManager();
String makisIdStr = cm.getConfigurationValue("makis.Id");
if (makisIdStr != null && makisIdStr.length() > 0) {
Key makis = KeyFactory.stringToKey(makisIdStr);
if (makis != null && makis.equals(mo.getRestaurant())) {
String packageId = cm.getConfigurationValue("makis.package.id");
if (packageId != null && packageId.length() > 0) {
json.add("makisPackageCostId", new JsonPrimitive(packageId));
json.add("makisMsg", new JsonPrimitive(cm.getConfigurationValue("makis.msg")));
boolean isIncluded = false;
Key packageKey = KeyFactory.stringToKey(packageId);
for (Iterator<OrderedPlate> iterator = mo.getPlates().iterator(); iterator.hasNext();) {
OrderedPlate plate = (OrderedPlate) iterator.next();
if (Boolean.FALSE.equals(plate.getIsFraction()) && plate.getPlate().equals(packageKey)) {
isIncluded = true;
break;
}
}
if (isIncluded == false) {
Plate packagePlate = pm.get(packageKey);
OrderedPlate oplate = new OrderedPlate();
oplate.setName(packagePlate.getName());
oplate.setPrice(packagePlate.getPrice());
oplate.setPriceInCents(packagePlate.getPriceInCents());
oplate.setQty(1);
oplate.setPlate(packageKey);
mo.getPlates().add(oplate);
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "failed to add makis specific logic", e);
}
}
private static JsonObject getSteakHouseSpecificData(MealOrder mo, Client c, Gson gson) {
JsonObject json = new JsonObject();
json.add("freeDelivery", new JsonPrimitive("false"));
try {
ConfigurationManager cm = new ConfigurationManager();
String steakIdStr = cm.getConfigurationValue("steakHouse.Id");
if (steakIdStr != null && steakIdStr.length() > 0) {
Key steak = KeyFactory.stringToKey(steakIdStr);
if (steak.equals(mo.getRestaurant())) {
if (!TimeController.getDayOfWeek().equals(DayOfWeek.SATURDAY) && !TimeController.getDayOfWeek().equals(DayOfWeek.SUNDAY)) {
if (TimeController.getCurrentTurn().equals(TurnType.LUNCH)) {
String foodCatsStr = cm.getConfigurationValue("steakHouse.FoodCats");
if (foodCatsStr != null && foodCatsStr.length() > 0) {
String[] foodCatsArray = foodCatsStr.split("\\|");
Set<Key> foodCats = new HashSet<Key>();
for (int i = 0; i < foodCatsArray.length; i++) {
if (foodCatsArray[i].length() > 0) {
foodCats.add(KeyFactory.stringToKey(foodCatsArray[i]));
}
}
List<OrderedPlate> plates = mo.getPlates();
PlateManager pm = new PlateManager();
for (Iterator iterator = plates.iterator(); iterator.hasNext();) {
OrderedPlate orderedPlate = (OrderedPlate) iterator.next();
Plate p = null;
if (Boolean.TRUE.equals(orderedPlate.getIsFraction())) {
p = pm.getPlate(orderedPlate.getFractionPlates().iterator().next());
} else {
p = pm.getPlate(orderedPlate.getPlate());
}
if (!foodCats.contains(p.getFoodCategory())) {
json.add("freeDelivery", new JsonPrimitive("false"));
return json;
}
}
json.add("freeDelivery", new JsonPrimitive("true"));
json.add("msg", new JsonPrimitive(cm.getConfigurationValue("steakHouse.msg")));
}
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "Could not set up things for SteakHouse", e);
}
return json;
}
public MealOrder getMealOrder(Client c, JsonObject sessionOderData) {
MealOrder mo = new MealOrder();
mo.setClient(c);
if (c.getContact() != null) {
mo.setClientPhone(c.getContact().getPhone());
}
mo.setAddress(getAddress(sessionOderData, c));
mo.setObservation(getObservation(sessionOderData));
mo.setRestaurant(getRestKey(sessionOderData));
mo.setPlates(getPlates(sessionOderData));
return mo;
}
private Key getAddress(JsonObject sessionOderData, Client c) {
try {
if (sessionOderData.get("address") == null) {
if (c.getMainAddress() != null) {
return c.getMainAddress();
} else {
return null;
}
} else {
if (sessionOderData.get("address") != null && !sessionOderData.get("address").isJsonNull() ) {
return KeyFactory.stringToKey(sessionOderData.get("address").getAsString());
}else{
return null;
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "no address da sessão havia {0}", sessionOderData.get("address"));
log.log(Level.SEVERE, "Error ao buscar endereço de cliente ou em sessão", e);
return null;
}
}
public List<OrderedPlate> getPlates(JsonObject sessionOderData) {
List<OrderedPlate> orderedPlates = new ArrayList<OrderedPlate>();
JsonArray array = sessionOderData.get("plates").getAsJsonArray();
for (int i = 0; i < array.size(); i++) {
JsonObject pjson = array.get(i).getAsJsonObject();
orderedPlates.add(getOrdered(pjson));
}
return orderedPlates;
}
private OrderedPlate getOrdered(JsonObject pjson) {
OrderedPlate oplate = new OrderedPlate();
oplate.setName(pjson.get("name").getAsString());
oplate.setPrice(pjson.get("price").getAsDouble());
oplate.setPriceInCents(Double.valueOf(pjson.get("price").getAsDouble() * 100.0).intValue());
oplate.setQty(pjson.get("qty").getAsInt());
if (pjson.get("isFraction").getAsBoolean() == true) {
oplate.setIsFraction(Boolean.TRUE);
Set<Key> fractionPlates = new HashSet<Key>();
JsonArray fractionKeys = pjson.get("fractionKeys").getAsJsonArray();
for (int i = 0; i < fractionKeys.size(); i++) {
Key fractionKey = KeyFactory.stringToKey(fractionKeys.get(i).getAsString());
fractionPlates.add(fractionKey);
}
oplate.setFractionPlates(fractionPlates);
return oplate;
} else {
String pkey = "";
if (pjson.get("plate").isJsonObject()) {
pkey = pjson.get("plate").getAsJsonObject().get("id").getAsString();
} else {
pkey = pjson.get("plate").getAsString();
}
oplate.setPlate(KeyFactory.stringToKey(pkey));
return oplate;
}
}
public Key getRestKey(JsonObject sessionOderData) {
String restKey;
if (sessionOderData.get("restaurant") != null) {
if (sessionOderData.get("restaurant").isJsonObject()) {
restKey = sessionOderData.get("restaurant").getAsJsonObject().get("id").getAsString();
} else {
restKey = sessionOderData.get("restaurant").getAsString();
}
} else {
restKey = sessionOderData.get("plates").getAsJsonArray().get(0).getAsJsonObject().get("plate").getAsJsonObject().get("value").getAsJsonObject().get("restaurant").getAsString();
}
return KeyFactory.stringToKey(restKey);
}
public String getObservation(JsonObject sessionOderData) {
return sessionOderData.get("observation").getAsString();
}
public String getFormView() {
return formView;
}
public void setFormView(String formView) {
this.formView = formView;
}
public String getSuccessView() {
return successView;
}
public void setSuccessView(String successView) {
this.successView = successView;
}
}
|
Java
|
current_dir = File.dirname(__FILE__)
log_level :info
log_location STDOUT
node_name "user"
client_key "#{ENV['HOME']}/.ssh/user.pem"
validation_client_name "user-validator"
validation_key "#{current_dir}/validator.pem"
chef_server_url "https://api.opscode.com/organizations/user-organization"
cache_type 'BasicFile'
cache_options(:path => "#{ENV['HOME']}/.chef/checksums" )
cookbook_path "#{current_dir}/../cookbooks"
# required to extract the right interface for knife ssh
knife[:ssh_attribute] = "ipaddress"
knife[:joyent_username] = ENV['SDC_USERNAME'] || 'user'
knife[:joyent_keyname] = ENV['SDC_CLI_KEY_ID'] || 'keyname'
knife[:joyent_keyfile] = ENV['SDC_CLI_IDENTITY'] || "#{ENV['HOME']}/.ssh/id_rsa"
knife[:joyent_api_url] = 'https://us-sw-1.api.joyentcloud.com/'
|
Java
|
using System.Threading;
using System.Threading.Tasks;
using MediatR;
namespace CoreDocker.Core.Framework.CommandQuery
{
public class MediatorCommander : ICommander
{
private readonly IMediator _mediator;
public MediatorCommander(IMediator mediator)
{
_mediator = mediator;
}
#region Implementation of ICommander
public async Task Notify<T>(T notificationRequest, CancellationToken cancellationToken) where T : CommandNotificationBase
{
await _mediator.Publish(notificationRequest);
}
public async Task<CommandResult> Execute<T>(T commandRequest, CancellationToken cancellationToken) where T : CommandRequestBase
{
return await _mediator.Send(commandRequest);
}
#endregion
}
}
|
Java
|
package hska.iwi.eShopMaster.model.businessLogic.manager.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import hska.iwi.eShopMaster.model.businessLogic.manager.CategoryManager;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.Category;
import hska.iwi.eShopMaster.model.businessLogic.manager.entity.User;
import java.util.List;
import javax.ws.rs.core.MediaType;
import org.apache.log4j.Logger;
public class CategoryManagerImpl implements CategoryManager {
private final static String BASIS_URL_CATEGORY = "http://localhost:8081/api/catalog/category/";
private final Logger logger = Logger.getLogger(CategoryManagerImpl.class);
private final ObjectMapper parser = new ObjectMapper();
private final User currentUser;
public CategoryManagerImpl(User currentUser) {
this.currentUser = currentUser;
}
@Override
public List<Category> getCategories() {
List<Category> categories = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
categories = parser.readValue(response.getEntity(String.class), List.class);
} catch (Exception ex) {
logger.error(ex);
}
return categories;
}
@Override
public Category getCategory(int id) {
Category category = null;
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
ClientResponse response = webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.get(ClientResponse.class);
category = parser.readValue(response.getEntity(String.class), Category.class);
} catch (Exception ex) {
logger.error(ex);
}
return category;
}
@Override
public void addCategory(String name) {
Category category = new Category(name);
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY);
webResource.type(MediaType.APPLICATION_JSON_TYPE)
.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.post(ClientResponse.class, parser.writeValueAsString(category));
} catch (Exception ex) {
logger.error(ex);
}
}
@Override
public void delCategoryById(int id) {
try {
Client client = Client.create();
WebResource webResource = client
.resource(BASIS_URL_CATEGORY)
.path(String.valueOf(id));
webResource.accept(MediaType.APPLICATION_JSON_TYPE)
.header("usr", currentUser.getUsername())
.header("pass", currentUser.getPassword())
.delete();
} catch (Exception ex) {
logger.error(ex);
}
}
}
|
Java
|
function f() {
var x=arguments[12];
}
|
Java
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesActionsTest(base.BaseVolumeTest):
"""Test volume actions"""
create_default_network = True
@classmethod
def resource_setup(cls):
super(VolumesActionsTest, cls).resource_setup()
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
@decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
@decorators.attr(type='smoke')
@utils.services('compute')
def test_attach_detach_volume_to_instance(self):
"""Test attaching and detaching volume to instance"""
# Create a server
server = self.create_server()
# Volume is attached and detached successfully from an instance
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'in-use')
self.volumes_client.detach_volume(self.volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
@decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
def test_volume_bootable(self):
"""Test setting and retrieving bootable flag of a volume"""
for bool_bootable in [True, False]:
self.volumes_client.set_bootable_volume(self.volume['id'],
bootable=bool_bootable)
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# Get Volume information
# NOTE(masayukig): 'bootable' is "true" or "false" in the current
# cinder implementation. So we need to cast boolean values to str
# and make it lower to compare here.
self.assertEqual(str(bool_bootable).lower(),
fetched_volume['bootable'])
@decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
@utils.services('compute')
def test_get_volume_attachment(self):
"""Test getting volume attachments
Attach a volume to a server, and then retrieve volume's attachments
info.
"""
# Create a server
server = self.create_server()
# Verify that a volume's attachment information is retrieved
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'],
'in-use')
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client,
self.volume['id'], 'available')
self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
volume = self.volumes_client.show_volume(self.volume['id'])['volume']
attachment = volume['attachments'][0]
self.assertEqual('/dev/%s' %
CONF.compute.volume_device_name,
attachment['device'])
self.assertEqual(server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
@utils.services('image')
def test_volume_upload(self):
"""Test uploading volume to create an image"""
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance images_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
body = self.volumes_client.upload_volume(
self.volume['id'], image_name=image_name,
disk_format=CONF.volume.disk_format)['os-volume_upload_image']
image_id = body["image_id"]
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image,
image_id)
waiters.wait_for_image_status(self.images_client, image_id, 'active')
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
image_info = self.images_client.show_image(image_id)
self.assertEqual(image_name, image_info['name'])
self.assertEqual(CONF.volume.disk_format, image_info['disk_format'])
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
"""Test reserving and unreserving volume"""
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('attaching', body['status'])
# Unmark volume as reserved.
self.volumes_client.unreserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('available', body['status'])
@decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
def test_volume_readonly_update(self):
"""Test updating and retrieve volume's readonly flag"""
for readonly in [True, False]:
# Update volume readonly
self.volumes_client.update_volume_readonly(self.volume['id'],
readonly=readonly)
# Get Volume information
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# NOTE(masayukig): 'readonly' is "True" or "False" in the current
# cinder implementation. So we need to cast boolean values to str
# to compare here.
self.assertEqual(str(readonly),
fetched_volume['metadata']['readonly'])
|
Java
|
/** @file
An example program which illustrates adding and manipulating an
HTTP response MIME header:
Usage: response_header_1.so
add read_resp_header hook
get http response header
if 200, then
add mime extension header with count of zero
add mime extension header with date response was received
add "Cache-Control: public" header
else if 304, then
retrieve cached header
get old value of mime header count
increment mime header count
store mime header with new count
@section license License
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed
with this work for additional information regarding copyright
ownership. The ASF licenses this file to you under the Apache
License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include "ts/ts.h"
#include "ts/ink_defs.h"
#define PLUGIN_NAME "response_header_1"
static int init_buffer_status;
static char *mimehdr1_name;
static char *mimehdr2_name;
static char *mimehdr1_value;
static TSMBuffer hdr_bufp;
static TSMLoc hdr_loc;
static TSMLoc field_loc;
static TSMLoc value_loc;
static void
modify_header(TSHttpTxn txnp)
{
TSMBuffer resp_bufp;
TSMBuffer cached_bufp;
TSMLoc resp_loc;
TSMLoc cached_loc;
TSHttpStatus resp_status;
TSMLoc new_field_loc;
TSMLoc cached_field_loc;
time_t recvd_time;
const char *chkptr;
int chklength;
int num_refreshes = 0;
if (!init_buffer_status) {
return; /* caller reenables */
}
if (TSHttpTxnServerRespGet(txnp, &resp_bufp, &resp_loc) != TS_SUCCESS) {
TSError("[%s] Couldn't retrieve server response header", PLUGIN_NAME);
return; /* caller reenables */
}
/* TSqa06246/TSqa06144 */
resp_status = TSHttpHdrStatusGet(resp_bufp, resp_loc);
if (TS_HTTP_STATUS_OK == resp_status) {
TSDebug(PLUGIN_NAME, "Processing 200 OK");
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
/* copy name/values created at init
* ( "x-num-served-from-cache" ) : ( "0" )
*/
TSMimeHdrFieldCopy(resp_bufp, resp_loc, new_field_loc, hdr_bufp, hdr_loc, field_loc);
/*********** Unclear why this is needed **************/
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
/* Cache-Control: Public */
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, TS_MIME_FIELD_CACHE_CONTROL, TS_MIME_LEN_CACHE_CONTROL);
TSMimeHdrFieldValueStringInsert(resp_bufp, resp_loc, new_field_loc, -1, TS_HTTP_VALUE_PUBLIC, TS_HTTP_LEN_PUBLIC);
/*
* mimehdr2_name = TSstrdup( "x-date-200-recvd" ) : CurrentDateTime
*/
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probably should check for errors */
TSDebug(PLUGIN_NAME, "Created new resp field with loc %p", new_field_loc);
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, mimehdr2_name, strlen(mimehdr2_name));
recvd_time = time(NULL);
TSMimeHdrFieldValueDateInsert(resp_bufp, resp_loc, new_field_loc, recvd_time);
TSHandleMLocRelease(resp_bufp, resp_loc, new_field_loc);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
} else if (TS_HTTP_STATUS_NOT_MODIFIED == resp_status) {
TSDebug(PLUGIN_NAME, "Processing 304 Not Modified");
/* N.B.: Protect writes to data (hash on URL + mutex: (ies)) */
/* Get the cached HTTP header */
if (TSHttpTxnCachedRespGet(txnp, &cached_bufp, &cached_loc) != TS_SUCCESS) {
TSError("[%s] STATUS 304, TSHttpTxnCachedRespGet():", PLUGIN_NAME);
TSError("[%s] Couldn't retrieve cached response header", PLUGIN_NAME);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
return; /* Caller reenables */
}
/* Get the cached MIME field name for this HTTP header */
cached_field_loc = TSMimeHdrFieldFind(cached_bufp, cached_loc, (const char *)mimehdr1_name, strlen(mimehdr1_name));
if (TS_NULL_MLOC == cached_field_loc) {
TSError("[%s] Can't find header %s in cached document", PLUGIN_NAME, mimehdr1_name);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
return; /* Caller reenables */
}
/* Get the cached MIME value for this name in this HTTP header */
chkptr = TSMimeHdrFieldValueStringGet(cached_bufp, cached_loc, cached_field_loc, -1, &chklength);
if (NULL == chkptr || !chklength) {
TSError("[%s] Could not find value for cached MIME field name %s", PLUGIN_NAME, mimehdr1_name);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
TSHandleMLocRelease(cached_bufp, cached_loc, cached_field_loc);
return; /* Caller reenables */
}
TSDebug(PLUGIN_NAME, "Header field value is %s, with length %d", chkptr, chklength);
/* Get the cached MIME value for this name in this HTTP header */
/*
TSMimeHdrFieldValueUintGet(cached_bufp, cached_loc, cached_field_loc, 0, &num_refreshes);
TSDebug(PLUGIN_NAME,
"Cached header shows %d refreshes so far", num_refreshes );
num_refreshes++ ;
*/
/* txn origin server response for this transaction stored
* in resp_bufp, resp_loc
*
* Create a new MIME field/value. Cached value has been incremented.
* Insert new MIME field/value into the server response buffer,
* allow HTTP processing to continue. This will update
* (indirectly invalidates) the cached HTTP headers MIME field.
* It is apparently not necessary to update all of the MIME fields
* in the in-process response in order to have the cached response
* become invalid.
*/
TSMimeHdrFieldCreate(resp_bufp, resp_loc, &new_field_loc); /* Probaby should check for errrors */
/* mimehdr1_name : TSstrdup( "x-num-served-from-cache" ) ; */
TSMimeHdrFieldAppend(resp_bufp, resp_loc, new_field_loc);
TSMimeHdrFieldNameSet(resp_bufp, resp_loc, new_field_loc, mimehdr1_name, strlen(mimehdr1_name));
TSMimeHdrFieldValueUintInsert(resp_bufp, resp_loc, new_field_loc, -1, num_refreshes);
TSHandleMLocRelease(resp_bufp, resp_loc, new_field_loc);
TSHandleMLocRelease(cached_bufp, cached_loc, cached_field_loc);
TSHandleMLocRelease(cached_bufp, TS_NULL_MLOC, cached_loc);
TSHandleMLocRelease(resp_bufp, TS_NULL_MLOC, resp_loc);
} else {
TSDebug(PLUGIN_NAME, "other response code %d", resp_status);
}
/*
* Additional 200/304 processing can go here, if so desired.
*/
/* Caller reneables */
}
static int
modify_response_header_plugin(TSCont contp ATS_UNUSED, TSEvent event, void *edata)
{
TSHttpTxn txnp = (TSHttpTxn)edata;
switch (event) {
case TS_EVENT_HTTP_READ_RESPONSE_HDR:
TSDebug(PLUGIN_NAME, "Called back with TS_EVENT_HTTP_READ_RESPONSE_HDR");
modify_header(txnp);
TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE);
/* fall through */
default:
break;
}
return 0;
}
void
TSPluginInit(int argc, const char *argv[])
{
TSMLoc chk_field_loc;
TSPluginRegistrationInfo info;
info.plugin_name = PLUGIN_NAME;
info.vendor_name = "Apache Software Foundation";
info.support_email = "dev@trafficserver.apache.org";
if (TSPluginRegister(&info) != TS_SUCCESS) {
TSError("[%s] Plugin registration failed", PLUGIN_NAME);
}
init_buffer_status = 0;
if (argc > 1) {
TSError("[%s] usage: %s", PLUGIN_NAME, argv[0]);
TSError("[%s] warning: too many args %d", PLUGIN_NAME, argc);
TSError("[%s] warning: ignoring unused arguments beginning with %s", PLUGIN_NAME, argv[1]);
}
/*
* The following code sets up an "init buffer" containing an extension header
* and its initial value. This will be the same for all requests, so we try
* to be efficient and do all of the work here rather than on a per-transaction
* basis.
*/
hdr_bufp = TSMBufferCreate();
TSMimeHdrCreate(hdr_bufp, &hdr_loc);
mimehdr1_name = TSstrdup("x-num-served-from-cache");
mimehdr1_value = TSstrdup("0");
/* Create name here and set DateTime value when o.s.
* response 200 is received
*/
mimehdr2_name = TSstrdup("x-date-200-recvd");
TSDebug(PLUGIN_NAME, "Inserting header %s with value %s into init buffer", mimehdr1_name, mimehdr1_value);
TSMimeHdrFieldCreate(hdr_bufp, hdr_loc, &field_loc); /* Probably should check for errors */
TSMimeHdrFieldAppend(hdr_bufp, hdr_loc, field_loc);
TSMimeHdrFieldNameSet(hdr_bufp, hdr_loc, field_loc, mimehdr1_name, strlen(mimehdr1_name));
TSMimeHdrFieldValueStringInsert(hdr_bufp, hdr_loc, field_loc, -1, mimehdr1_value, strlen(mimehdr1_value));
TSDebug(PLUGIN_NAME, "init buffer hdr, field and value locs are %p, %p and %p", hdr_loc, field_loc, value_loc);
init_buffer_status = 1;
TSHttpHookAdd(TS_HTTP_READ_RESPONSE_HDR_HOOK, TSContCreate(modify_response_header_plugin, NULL));
/*
* The following code demonstrates how to extract the field_loc from the header.
* In this plugin, the init buffer and thus field_loc never changes. Code
* similar to this may be used to extract header fields from any buffer.
*/
if (TS_NULL_MLOC == (chk_field_loc = TSMimeHdrFieldGet(hdr_bufp, hdr_loc, 0))) {
TSError("[%s] Couldn't retrieve header field from init buffer", PLUGIN_NAME);
TSError("[%s] Marking init buffer as corrupt; no more plugin processing", PLUGIN_NAME);
init_buffer_status = 0;
/* bail out here and reenable transaction */
} else {
if (field_loc != chk_field_loc) {
TSError("[%s] Retrieved buffer field loc is %p when it should be %p", PLUGIN_NAME, chk_field_loc, field_loc);
}
}
}
|
Java
|
import App from '../containers/App';
import { PageNotFound } from '../components';
import homeRoute from '../features/home/route';
import taggrRoute from '../features/taggr/route';
const routes = [{
path: '/',
component: App,
childRoutes: [
homeRoute,
taggrRoute,
{ path: '*', name: 'Page not found', component: PageNotFound },
],
}];
// Handle isIndex property of route config:
// 1. remove the first child with isIndex=true from childRoutes
// 2. assign it to the indexRoute property of the parent.
function handleIndexRoute(route) {
if (!route.childRoutes || !route.childRoutes.length) {
return;
}
route.childRoutes = route.childRoutes.filter(child => { // eslint-disable-line
if (child.isIndex) {
/* istanbul ignore next */
if (process.env.NODE_ENV === 'dev' && route.indexRoute) {
console.error('More than one index route: ', route);
}
/* istanbul ignore else */
if (!route.indexRoute) {
delete child.path; // eslint-disable-line
route.indexRoute = child; // eslint-disable-line
return false;
}
}
return true;
});
route.childRoutes.forEach(handleIndexRoute);
}
routes.forEach(handleIndexRoute);
export default routes;
|
Java
|
/**
* Created by txs on 2016/10/17.
*/
public class Student {
String name;
int grade;
@Override
public String toString() {
String temp = "";
temp += "name: " + name + "\n";
temp += "grade: " + grade + "\n";
return temp;
}
@Override
public boolean equals(Object obj) {
if(this==obj) return true;
boolean r = false;
if(obj instanceof Student){
Student temp = (Student)obj;
if(this.name.equals(temp.name)
&& this.grade == temp.grade)
r = true;
}
return r;
}
}
|
Java
|
# Arduino Debug
## Install
### 1. install Arduino IDE (free)
www.arduino.cc > download
### 2. install Microsoft Visual Studio 2015 Community Edition (free)
* get vs2015.com_chs.iso
* select C++ from the available VS2015 setup options.
>It requires Visual Studio C++ to be installed before Arduino projects can be created or opened.
### 3. install plugin
https://visualstudiogallery.msdn.microsoft.com/069a905d-387d-4415-bc37-665a5ac9caba
## Use
### Visual Studio 2015 Community
1. Open File (Ctrl + O), select Arduino src file.
2. add breakpoint by click line number.
3. add watching expressiong by adding actions like {x} {y}
4. run debug
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Class io.permazen.util.NavigableSetPager (Permazen 4.1.9-SNAPSHOT API)</title>
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class io.permazen.util.NavigableSetPager (Permazen 4.1.9-SNAPSHOT API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../io/permazen/util/NavigableSetPager.html" title="class in io.permazen.util">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?io/permazen/util/class-use/NavigableSetPager.html" target="_top">Frames</a></li>
<li><a href="NavigableSetPager.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class io.permazen.util.NavigableSetPager" class="title">Uses of Class<br>io.permazen.util.NavigableSetPager</h2>
</div>
<div class="classUseContainer">No usage of io.permazen.util.NavigableSetPager</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../io/permazen/util/NavigableSetPager.html" title="class in io.permazen.util">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../index-all.html">Index</a></li>
<li><a href="../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../index.html?io/permazen/util/class-use/NavigableSetPager.html" target="_top">Frames</a></li>
<li><a href="NavigableSetPager.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2022. All rights reserved.</small></p>
</body>
</html>
|
Java
|
using System;
using System.IO;
using System.Reflection;
using Moq;
using NUnit.Framework;
using Weald.Service;
namespace Weald.Tests
{
[TestFixture]
public class VisualSvnServerInfoProviderTests
{
private string _tempFilePath;
[SetUp]
public void SetUp()
{
_tempFilePath = Path.GetTempFileName();
}
[TearDown]
public void TearDown()
{
try
{
File.Delete(_tempFilePath);
}
catch
{
}
}
[Test]
public void NonExistentServiceExecutableMeansNothingWorks()
{
var mockServerPathProvider = new Mock<IProvideVisualSvnServerPaths>();
mockServerPathProvider.Setup(x => x.ServiceExePath)
.Returns(Guid.NewGuid().ToString());
var mockWebConfigProvider = new Mock<IProvideWebConfiguration>();
mockWebConfigProvider.Setup(x => x.GetValue("SvnServerAlias"))
.Returns("Foo");
var serverInfo = new VisualSvnServerInfoProvider(mockServerPathProvider.Object, mockWebConfigProvider.Object);
Assert.IsFalse(serverInfo.IsVisualSvnServerInstalled);
Assert.IsNullOrEmpty(serverInfo.RepoStoragePath);
Assert.IsNullOrEmpty(serverInfo.SvnLookExePath);
}
[Test]
public void CanGetNormalizedRepoStoragePath()
{
File.WriteAllLines(_tempFilePath, new[] { "FOO", "#BAR", " SVNParentPath \"E:/svn/repos/\"", " BAZ" });
var mockServerPathProvider = new Mock<IProvideVisualSvnServerPaths>();
mockServerPathProvider.Setup(x => x.ServiceExePath)
.Returns(Assembly.GetExecutingAssembly().Location);
mockServerPathProvider.Setup(x => x.ServiceConfigFilePath)
.Returns(_tempFilePath);
mockServerPathProvider.Setup(x => x.ServiceBinDirectory)
.Returns("C:\\Foo");
var mockWebConfigProvider = new Mock<IProvideWebConfiguration>();
mockWebConfigProvider.Setup(x => x.GetValue("SvnServerAlias"))
.Returns("Foo");
var serverInfo = new VisualSvnServerInfoProvider(mockServerPathProvider.Object, mockWebConfigProvider.Object);
Assert.IsTrue(serverInfo.IsVisualSvnServerInstalled);
Assert.IsNotNullOrEmpty(serverInfo.RepoStoragePath);
Assert.AreEqual(@"e:\svn\repos", serverInfo.RepoStoragePath.ToLowerInvariant());
}
}
}
|
Java
|
# Arenaria drypidea Boiss. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_45) on Mon Mar 03 10:44:38 EST 2014 -->
<title>Uses of Interface org.hibernate.event.spi.PreUpdateEventListener (Hibernate JavaDocs)</title>
<meta name="date" content="2014-03-03">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Interface org.hibernate.event.spi.PreUpdateEventListener (Hibernate JavaDocs)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/hibernate/event/spi/class-use/PreUpdateEventListener.html" target="_top">Frames</a></li>
<li><a href="PreUpdateEventListener.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Interface org.hibernate.event.spi.PreUpdateEventListener" class="title">Uses of Interface<br>org.hibernate.event.spi.PreUpdateEventListener</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#org.hibernate.cfg.beanvalidation">org.hibernate.cfg.beanvalidation</a></td>
<td class="colLast"> </td>
</tr>
<tr class="rowColor">
<td class="colFirst"><a href="#org.hibernate.event.spi">org.hibernate.event.spi</a></td>
<td class="colLast"> </td>
</tr>
<tr class="altColor">
<td class="colFirst"><a href="#org.hibernate.secure.internal">org.hibernate.secure.internal</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="org.hibernate.cfg.beanvalidation">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a> in <a href="../../../../../org/hibernate/cfg/beanvalidation/package-summary.html">org.hibernate.cfg.beanvalidation</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing classes, and an explanation">
<caption><span>Classes in <a href="../../../../../org/hibernate/cfg/beanvalidation/package-summary.html">org.hibernate.cfg.beanvalidation</a> that implement <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Class and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>class </code></td>
<td class="colLast"><code><strong><a href="../../../../../org/hibernate/cfg/beanvalidation/BeanValidationEventListener.html" title="class in org.hibernate.cfg.beanvalidation">BeanValidationEventListener</a></strong></code>
<div class="block"><div class="paragraph"></div>
</td>
</tr>
</tbody>
</table>
</li>
<li class="blockList"><a name="org.hibernate.event.spi">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a> in <a href="../../../../../org/hibernate/event/spi/package-summary.html">org.hibernate.event.spi</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing fields, and an explanation">
<caption><span>Fields in <a href="../../../../../org/hibernate/event/spi/package-summary.html">org.hibernate.event.spi</a> with type parameters of type <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Field and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>static <a href="../../../../../org/hibernate/event/spi/EventType.html" title="class in org.hibernate.event.spi">EventType</a><<a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a>></code></td>
<td class="colLast"><span class="strong">EventType.</span><code><strong><a href="../../../../../org/hibernate/event/spi/EventType.html#PRE_UPDATE">PRE_UPDATE</a></strong></code> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList"><a name="org.hibernate.secure.internal">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a> in <a href="../../../../../org/hibernate/secure/internal/package-summary.html">org.hibernate.secure.internal</a></h3>
<table border="0" cellpadding="3" cellspacing="0" summary="Use table, listing classes, and an explanation">
<caption><span>Classes in <a href="../../../../../org/hibernate/secure/internal/package-summary.html">org.hibernate.secure.internal</a> that implement <a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">PreUpdateEventListener</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Class and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code>class </code></td>
<td class="colLast"><code><strong><a href="../../../../../org/hibernate/secure/internal/JaccPreUpdateEventListener.html" title="class in org.hibernate.secure.internal">JaccPreUpdateEventListener</a></strong></code>
<div class="block"><div class="paragraph"></div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../org/hibernate/event/spi/PreUpdateEventListener.html" title="interface in org.hibernate.event.spi">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/hibernate/event/spi/class-use/PreUpdateEventListener.html" target="_top">Frames</a></li>
<li><a href="PreUpdateEventListener.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2001-2014 <a href="http://redhat.com">Red Hat, Inc.</a> All Rights Reserved.</small></p>
</body>
</html>
|
Java
|
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.openapi.vcs;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.execution.ui.ConsoleViewContentType;
import com.intellij.util.containers.ContainerUtil;
import consulo.util.lang.Pair;
import consulo.util.lang.StringUtil;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Collections;
import java.util.List;
public final class VcsConsoleLine {
private final List<Pair<String, ConsoleViewContentType>> myChunks;
private VcsConsoleLine(@Nonnull List<Pair<String, ConsoleViewContentType>> chunks) {
myChunks = chunks;
}
public void print(@Nonnull ConsoleView console) {
ConsoleViewContentType lastType = ConsoleViewContentType.NORMAL_OUTPUT;
for (Pair<String, ConsoleViewContentType> chunk : myChunks) {
console.print(chunk.first, chunk.second);
lastType = chunk.second;
}
console.print("\n", lastType);
}
@Nullable
public static VcsConsoleLine create(@Nullable String message, @Nonnull ConsoleViewContentType contentType) {
return create(Collections.singletonList(Pair.create(message, contentType)));
}
@Nullable
public static VcsConsoleLine create(@Nonnull List<Pair<String, ConsoleViewContentType>> lineChunks) {
List<Pair<String, ConsoleViewContentType>> chunks = ContainerUtil.filter(lineChunks, it -> !StringUtil.isEmptyOrSpaces(it.first));
if (chunks.isEmpty()) return null;
return new VcsConsoleLine(chunks);
}
}
|
Java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.brixcms.web.nodepage;
import org.apache.wicket.IRequestTarget;
import org.apache.wicket.Page;
import org.apache.wicket.PageParameters;
import org.apache.wicket.RequestCycle;
import org.apache.wicket.model.IModel;
import org.apache.wicket.request.target.component.IPageRequestTarget;
import org.apache.wicket.util.lang.Objects;
import org.apache.wicket.util.string.StringValue;
import org.brixcms.exception.BrixException;
import org.brixcms.jcr.wrapper.BrixNode;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.TreeSet;
public class BrixPageParameters implements Serializable {
// ------------------------------ FIELDS ------------------------------
private static final long serialVersionUID = 1L;
private List<String> indexedParameters = null;
;
private List<QueryStringParameter> queryStringParameters = null;
// -------------------------- STATIC METHODS --------------------------
public static boolean equals(BrixPageParameters p1, BrixPageParameters p2) {
if (Objects.equal(p1, p2)) {
return true;
}
if (p1 == null && p2.getIndexedParamsCount() == 0 && p2.getQueryParamKeys().isEmpty()) {
return true;
}
if (p2 == null && p1.getIndexedParamsCount() == 0 && p1.getQueryParamKeys().isEmpty()) {
return true;
}
return false;
}
public int getIndexedParamsCount() {
return indexedParameters != null ? indexedParameters.size() : 0;
}
public static BrixPageParameters getCurrent() {
IRequestTarget target = RequestCycle.get().getRequestTarget();
// this is required for getting current page parameters from page constructor
// (the actual page instance is not constructed yet.
if (target instanceof PageParametersRequestTarget) {
return ((PageParametersRequestTarget) target).getPageParameters();
} else {
return getCurrentPage().getBrixPageParameters();
}
}
// --------------------------- CONSTRUCTORS ---------------------------
public BrixPageParameters() {
}
public BrixPageParameters(PageParameters params) {
if (params != null) {
for (String name : params.keySet()) {
addQueryParam(name, params.get(name));
}
}
}
public void addQueryParam(String name, Object value) {
addQueryParam(name, value, -1);
}
public BrixPageParameters(BrixPageParameters copy) {
if (copy == null) {
throw new IllegalArgumentException("Copy argument may not be null.");
}
if (copy.indexedParameters != null)
this.indexedParameters = new ArrayList<String>(copy.indexedParameters);
if (copy.queryStringParameters != null)
this.queryStringParameters = new ArrayList<QueryStringParameter>(
copy.queryStringParameters);
}
// ------------------------ CANONICAL METHODS ------------------------
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof BrixPageParameters == false) {
return false;
}
BrixPageParameters rhs = (BrixPageParameters) obj;
if (!Objects.equal(indexedParameters, rhs.indexedParameters)) {
return false;
}
if (queryStringParameters == null || rhs.queryStringParameters == null) {
return rhs.queryStringParameters == queryStringParameters;
}
if (queryStringParameters.size() != rhs.queryStringParameters.size()) {
return false;
}
for (String key : getQueryParamKeys()) {
List<StringValue> values1 = getQueryParams(key);
Set<String> v1 = new TreeSet<String>();
List<StringValue> values2 = rhs.getQueryParams(key);
Set<String> v2 = new TreeSet<String>();
for (StringValue sv : values1) {
v1.add(sv.toString());
}
for (StringValue sv : values2) {
v2.add(sv.toString());
}
if (v1.equals(v2) == false) {
return false;
}
}
return true;
}
public Set<String> getQueryParamKeys() {
if (queryStringParameters == null || queryStringParameters.isEmpty()) {
return Collections.emptySet();
}
Set<String> set = new TreeSet<String>();
for (QueryStringParameter entry : queryStringParameters) {
set.add(entry.key);
}
return Collections.unmodifiableSet(set);
}
public List<StringValue> getQueryParams(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
List<StringValue> result = new ArrayList<StringValue>();
for (QueryStringParameter entry : queryStringParameters) {
if (entry.key.equals(name)) {
result.add(StringValue.valueOf(entry.value));
}
}
return Collections.unmodifiableList(result);
} else {
return Collections.emptyList();
}
}
// -------------------------- OTHER METHODS --------------------------
public void addQueryParam(String name, Object value, int index) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (value == null) {
throw new IllegalArgumentException("Parameter value may not be null.");
}
if (queryStringParameters == null)
queryStringParameters = new ArrayList<QueryStringParameter>(1);
QueryStringParameter entry = new QueryStringParameter(name, value.toString());
if (index == -1)
queryStringParameters.add(entry);
else
queryStringParameters.add(index, entry);
}
void assign(BrixPageParameters other) {
if (this != other) {
this.indexedParameters = other.indexedParameters;
this.queryStringParameters = other.queryStringParameters;
}
}
public void clearIndexedParams() {
this.indexedParameters = null;
}
public void clearQueryParams() {
this.queryStringParameters = null;
}
public StringValue getIndexedParam(int index) {
if (indexedParameters != null) {
if (index >= 0 && index < indexedParameters.size()) {
String value = indexedParameters.get(index);
return StringValue.valueOf(value);
}
}
return StringValue.valueOf((String) null);
}
public StringValue getQueryParam(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
for (QueryStringParameter entry : queryStringParameters) {
if (entry.key.equals(name)) {
return StringValue.valueOf(entry.value);
}
}
}
return StringValue.valueOf((String) null);
}
public List<QueryStringParameter> getQueryStringParams() {
if (queryStringParameters == null) {
return Collections.emptyList();
} else {
return Collections.unmodifiableList(new ArrayList<QueryStringParameter>(
queryStringParameters));
}
}
;
public void removeIndexedParam(int index) {
if (indexedParameters != null) {
if (index >= 0 && index < indexedParameters.size()) {
indexedParameters.remove(index);
}
}
}
public void setIndexedParam(int index, Object object) {
if (indexedParameters == null)
indexedParameters = new ArrayList<String>(index);
for (int i = indexedParameters.size(); i <= index; ++i) {
indexedParameters.add(null);
}
String value = object != null ? object.toString() : null;
indexedParameters.set(index, value);
}
public void setQueryParam(String name, Object value) {
setQueryParam(name, value, -1);
}
public void setQueryParam(String name, Object value, int index) {
removeQueryParam(name);
if (value != null) {
addQueryParam(name, value);
}
}
public void removeQueryParam(String name) {
if (name == null) {
throw new IllegalArgumentException("Parameter name may not be null.");
}
if (queryStringParameters != null) {
for (Iterator<QueryStringParameter> i = queryStringParameters.iterator(); i.hasNext();) {
QueryStringParameter e = i.next();
if (e.key.equals(name)) {
i.remove();
}
}
}
}
public String toCallbackURL() {
return urlFor(getCurrentPage());
}
/**
* Constructs a url to the specified page appending these page parameters
*
* @param page
* @return url
*/
public String urlFor(BrixNodeWebPage page) {
IRequestTarget target = new BrixNodeRequestTarget(page, this);
return RequestCycle.get().urlFor(target).toString();
}
static BrixNodeWebPage getCurrentPage() {
IRequestTarget target = RequestCycle.get().getRequestTarget();
BrixNodeWebPage page = null;
if (target != null && target instanceof IPageRequestTarget) {
Page p = ((IPageRequestTarget) target).getPage();
if (p instanceof BrixNodeWebPage) {
page = (BrixNodeWebPage) p;
}
}
if (page == null) {
throw new BrixException(
"Couldn't obtain the BrixNodeWebPage instance from RequestTarget.");
}
return page;
}
/**
* Constructs a url to the specified page appending these page parameters
*
* @param
* @return url
*/
public String urlFor(IModel<BrixNode> node) {
IRequestTarget target = new BrixNodeRequestTarget(node, this);
return RequestCycle.get().urlFor(target).toString();
}
// -------------------------- INNER CLASSES --------------------------
public static class QueryStringParameter implements Serializable {
private static final long serialVersionUID = 1L;
private final String key;
private final String value;
public QueryStringParameter(String key, String value) {
this.key = key;
this.value = value;
}
public String getKey() {
return key;
}
public String getValue() {
return value;
}
}
}
|
Java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.test.recovery;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.io.DiscardingOutputFormat;
import org.apache.flink.client.program.ProgramInvocationException;
import org.apache.flink.configuration.AkkaOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.JobManagerOptions;
import org.apache.flink.runtime.akka.AkkaUtils;
import org.apache.flink.runtime.client.JobStatusMessage;
import org.apache.flink.runtime.highavailability.HighAvailabilityServices;
import org.apache.flink.runtime.highavailability.HighAvailabilityServicesUtils;
import org.apache.flink.runtime.jobmanager.JobManager;
import org.apache.flink.runtime.jobmanager.MemoryArchivist;
import org.apache.flink.runtime.messages.JobManagerMessages;
import org.apache.flink.runtime.metrics.NoOpMetricRegistry;
import org.apache.flink.runtime.testingUtils.TestingUtils;
import org.apache.flink.runtime.testutils.CommonTestUtils;
import org.apache.flink.util.NetUtils;
import org.apache.flink.util.TestLogger;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.pattern.Patterns;
import akka.util.Timeout;
import org.junit.Test;
import java.io.File;
import java.io.StringWriter;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import scala.Option;
import scala.Some;
import scala.Tuple2;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.duration.FiniteDuration;
import static org.apache.flink.runtime.testutils.CommonTestUtils.getCurrentClasspath;
import static org.apache.flink.runtime.testutils.CommonTestUtils.getJavaCommandPath;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* This test makes sure that jobs are canceled properly in cases where
* the task manager went down and did not respond to cancel messages.
*/
@SuppressWarnings("serial")
public class ProcessFailureCancelingITCase extends TestLogger {
@Test
public void testCancelingOnProcessFailure() throws Exception {
final StringWriter processOutput = new StringWriter();
ActorSystem jmActorSystem = null;
Process taskManagerProcess = null;
HighAvailabilityServices highAvailabilityServices = null;
try {
// check that we run this test only if the java command
// is available on this machine
String javaCommand = getJavaCommandPath();
if (javaCommand == null) {
System.out.println("---- Skipping Process Failure test : Could not find java executable ----");
return;
}
// create a logging file for the process
File tempLogFile = File.createTempFile(getClass().getSimpleName() + "-", "-log4j.properties");
tempLogFile.deleteOnExit();
CommonTestUtils.printLog4jDebugConfig(tempLogFile);
// find a free port to start the JobManager
final int jobManagerPort = NetUtils.getAvailablePort();
// start a JobManager
Tuple2<String, Object> localAddress = new Tuple2<String, Object>("localhost", jobManagerPort);
Configuration jmConfig = new Configuration();
jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_INTERVAL, "5 s");
jmConfig.setString(AkkaOptions.WATCH_HEARTBEAT_PAUSE, "2000 s");
jmConfig.setInteger(AkkaOptions.WATCH_THRESHOLD, 10);
jmConfig.setString(AkkaOptions.ASK_TIMEOUT, "100 s");
jmConfig.setString(JobManagerOptions.ADDRESS, localAddress._1());
jmConfig.setInteger(JobManagerOptions.PORT, jobManagerPort);
highAvailabilityServices = HighAvailabilityServicesUtils.createHighAvailabilityServices(
jmConfig,
TestingUtils.defaultExecutor(),
HighAvailabilityServicesUtils.AddressResolution.NO_ADDRESS_RESOLUTION);
jmActorSystem = AkkaUtils.createActorSystem(jmConfig, new Some<>(localAddress));
ActorRef jmActor = JobManager.startJobManagerActors(
jmConfig,
jmActorSystem,
TestingUtils.defaultExecutor(),
TestingUtils.defaultExecutor(),
highAvailabilityServices,
new NoOpMetricRegistry(),
Option.empty(),
JobManager.class,
MemoryArchivist.class)._1();
// the TaskManager java command
String[] command = new String[] {
javaCommand,
"-Dlog.level=DEBUG",
"-Dlog4j.configuration=file:" + tempLogFile.getAbsolutePath(),
"-Xms80m", "-Xmx80m",
"-classpath", getCurrentClasspath(),
AbstractTaskManagerProcessFailureRecoveryTest.TaskManagerProcessEntryPoint.class.getName(),
String.valueOf(jobManagerPort)
};
// start the first two TaskManager processes
taskManagerProcess = new ProcessBuilder(command).start();
new CommonTestUtils.PipeForwarder(taskManagerProcess.getErrorStream(), processOutput);
// we wait for the JobManager to have the two TaskManagers available
// since some of the CI environments are very hostile, we need to give this a lot of time (2 minutes)
waitUntilNumTaskManagersAreRegistered(jmActor, 1, 120000);
final Throwable[] errorRef = new Throwable[1];
// start the test program, which infinitely blocks
Runnable programRunner = new Runnable() {
@Override
public void run() {
try {
ExecutionEnvironment env = ExecutionEnvironment.createRemoteEnvironment("localhost", jobManagerPort);
env.setParallelism(2);
env.setRestartStrategy(RestartStrategies.noRestart());
env.getConfig().disableSysoutLogging();
env.generateSequence(0, Long.MAX_VALUE)
.map(new MapFunction<Long, Long>() {
@Override
public Long map(Long value) throws Exception {
synchronized (this) {
wait();
}
return 0L;
}
})
.output(new DiscardingOutputFormat<Long>());
env.execute();
}
catch (Throwable t) {
errorRef[0] = t;
}
}
};
Thread programThread = new Thread(programRunner);
// kill the TaskManager
taskManagerProcess.destroy();
taskManagerProcess = null;
// immediately submit the job. this should hit the case
// where the JobManager still thinks it has the TaskManager and tries to send it tasks
programThread.start();
// try to cancel the job
cancelRunningJob(jmActor);
// we should see a failure within reasonable time (10s is the ask timeout).
// since the CI environment is often slow, we conservatively give it up to 2 minutes,
// to fail, which is much lower than the failure time given by the heartbeats ( > 2000s)
programThread.join(120000);
assertFalse("The program did not cancel in time (2 minutes)", programThread.isAlive());
Throwable error = errorRef[0];
assertNotNull("The program did not fail properly", error);
assertTrue(error instanceof ProgramInvocationException);
// all seems well :-)
}
catch (Exception e) {
printProcessLog("TaskManager", processOutput.toString());
throw e;
}
catch (Error e) {
printProcessLog("TaskManager 1", processOutput.toString());
throw e;
}
finally {
if (taskManagerProcess != null) {
taskManagerProcess.destroy();
}
if (jmActorSystem != null) {
jmActorSystem.shutdown();
}
if (highAvailabilityServices != null) {
highAvailabilityServices.closeAndCleanupAllData();
}
}
}
private void cancelRunningJob(ActorRef jobManager) throws Exception {
final FiniteDuration askTimeout = new FiniteDuration(10, TimeUnit.SECONDS);
// try at most for 30 seconds
final long deadline = System.currentTimeMillis() + 30000;
JobID jobId = null;
do {
Future<Object> response = Patterns.ask(jobManager,
JobManagerMessages.getRequestRunningJobsStatus(), new Timeout(askTimeout));
Object result;
try {
result = Await.result(response, askTimeout);
}
catch (Exception e) {
throw new Exception("Could not retrieve running jobs from the JobManager.", e);
}
if (result instanceof JobManagerMessages.RunningJobsStatus) {
List<JobStatusMessage> jobs = ((JobManagerMessages.RunningJobsStatus) result).getStatusMessages();
if (jobs.size() == 1) {
jobId = jobs.get(0).getJobId();
break;
}
}
}
while (System.currentTimeMillis() < deadline);
if (jobId == null) {
// we never found it running, must have failed already
return;
}
// tell the JobManager to cancel the job
jobManager.tell(
new JobManagerMessages.LeaderSessionMessage(
HighAvailabilityServices.DEFAULT_LEADER_ID,
new JobManagerMessages.CancelJob(jobId)),
ActorRef.noSender());
}
private void waitUntilNumTaskManagersAreRegistered(ActorRef jobManager, int numExpected, long maxDelay)
throws Exception {
final long deadline = System.currentTimeMillis() + maxDelay;
while (true) {
long remaining = deadline - System.currentTimeMillis();
if (remaining <= 0) {
fail("The TaskManagers did not register within the expected time (" + maxDelay + "msecs)");
}
FiniteDuration timeout = new FiniteDuration(remaining, TimeUnit.MILLISECONDS);
try {
Future<?> result = Patterns.ask(jobManager,
JobManagerMessages.getRequestNumberRegisteredTaskManager(),
new Timeout(timeout));
Integer numTMs = (Integer) Await.result(result, timeout);
if (numTMs == numExpected) {
break;
}
}
catch (TimeoutException e) {
// ignore and retry
}
catch (ClassCastException e) {
fail("Wrong response: " + e.getMessage());
}
}
}
private void printProcessLog(String processName, String log) {
if (log == null || log.length() == 0) {
return;
}
System.out.println("-----------------------------------------");
System.out.println(" BEGIN SPAWNED PROCESS LOG FOR " + processName);
System.out.println("-----------------------------------------");
System.out.println(log);
System.out.println("-----------------------------------------");
System.out.println(" END SPAWNED PROCESS LOG");
System.out.println("-----------------------------------------");
}
}
|
Java
|
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2011.09.09 at 01:22:27 PM CEST
//
package test;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.XmlValue;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <attribute name="content-type" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" />
* <attribute name="seq" type="{http://www.w3.org/2001/XMLSchema}anySimpleType" />
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"content"
})
@XmlRootElement(name = "fpage")
public class Fpage {
@XmlValue
protected String content;
@XmlAttribute(name = "content-type")
@XmlSchemaType(name = "anySimpleType")
protected String contentType;
@XmlAttribute
@XmlSchemaType(name = "anySimpleType")
protected String seq;
/**
* Gets the value of the content property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getContent() {
return content;
}
/**
* Sets the value of the content property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setContent(String value) {
this.content = value;
}
/**
* Gets the value of the contentType property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getContentType() {
return contentType;
}
/**
* Sets the value of the contentType property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setContentType(String value) {
this.contentType = value;
}
/**
* Gets the value of the seq property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getSeq() {
return seq;
}
/**
* Sets the value of the seq property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setSeq(String value) {
this.seq = value;
}
}
|
Java
|
'use strict';
const Task = require('co-task');
const sql = require('../api/helpers/sql');
module.exports = {
up: function (queryInterface, Sequelize) {
return Task.spawn(function* () {
yield queryInterface.addColumn('ClassicSalads', 'ClassicSaladCatagoryId', Sequelize.INTEGER);
yield sql.foreignKeyUp(queryInterface, 'ClassicSalads', 'ClassicSaladCatagoryId', 'ClassicSaladCatagories', 'id');
});
},
down: function (queryInterface, Sequelize) {
return Task.spawn(function* () {
yield sql.foreignKeyDown(queryInterface, 'ClassicSalads', 'ClassicSaladCatagoryId', 'ClassicSaladCatagories', 'id');
yield queryInterface.removeColumn('ClassicSalads', 'ClassicSaladCatagoryId');
});
}
};
|
Java
|
<div class="navbar-default sidebar" role="navigation">
<div class="sidebar-nav navbar-collapse">
<ul class="nav in" id="side-menu">
<li>
</li>
<li ng-class="{active: collapseVar==key}" ng-repeat="(key, value) in doc.data">
<a href="" ng-click="check(key)"><i class="fa fa-info-circle fa-fw"></i> {{key}}<span class="fa arrow"></span></a>
<ul class="nav nav-second-level" collapse="collapseVar!=key">
<li ng-repeat="(key2, value2) in value" ng-show="key2 != 'info'">
<a href="/#/dashboard/doc/{{docapp.id}}/api/{{value2.info.id}}">{{key2}}</a>
</li>
</ul>
</li>
</ul>
</div>
</div>
|
Java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.oozie.action.hadoop;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.oozie.action.ActionExecutorException;
import org.apache.oozie.util.XLog;
import org.jdom.Element;
import org.jdom.Namespace;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;
public abstract class ScriptLanguageActionExecutor extends JavaActionExecutor {
public ScriptLanguageActionExecutor(String type) {
super(type);
}
@Override
public List<Class<?>> getLauncherClasses() {
return null;
}
protected boolean shouldAddScriptToCache(){
return true;
}
@Override
protected Configuration setupLauncherConf(Configuration conf, Element actionXml, Path appPath, Context context)
throws ActionExecutorException {
super.setupLauncherConf(conf, actionXml, appPath, context);
if(shouldAddScriptToCache()) {
addScriptToCache(conf, actionXml, appPath, context);
}
return conf;
}
protected void addScriptToCache(Configuration conf, Element actionXml, Path appPath, Context context)
throws ActionExecutorException {
Namespace ns = actionXml.getNamespace();
String script = actionXml.getChild("script", ns).getTextTrim();
String name = new Path(script).getName();
String scriptContent = context.getProtoActionConf().get(this.getScriptName());
Path scriptFile = null;
if (scriptContent != null) { // Create script on filesystem if this is
// an http submission job;
FSDataOutputStream dos = null;
try {
Path actionPath = context.getActionDir();
scriptFile = new Path(actionPath, script);
FileSystem fs = context.getAppFileSystem();
dos = fs.create(scriptFile);
dos.write(scriptContent.getBytes(StandardCharsets.UTF_8));
addToCache(conf, actionPath, script + "#" + name, false);
}
catch (Exception ex) {
throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "FAILED_OPERATION", XLog
.format("Not able to write script file {0} on hdfs", scriptFile), ex);
}
finally {
try {
if (dos != null) {
dos.close();
}
}
catch (IOException ex) {
XLog.getLog(getClass()).error("Error: " + ex.getMessage());
}
}
}
else {
addToCache(conf, appPath, script + "#" + name, false);
}
}
protected abstract String getScriptName();
}
|
Java
|
# ----------------------------------------------------------------------------
#
# Package : filebeat
# Version : 7.2.0
# Source repo : https://github.com/elastic/beats.git
# Tested on : RHEL 7.5
# Script License: Apache License Version 2.0
# Maintainer : Edmond Chan <ckchan@hk1.ibm.com>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# install dependencies
yum install golang make git
mkdir go
export GOPATH=~/go
mkdir -p ${GOPATH}/src/github.com/elastic
cd ${GOPATH}/src/github.com/elastic
git clone https://github.com/elastic/beats.git
cd $GOPATH/src/github.com/elastic/beats/filebeat
git checkout v7.2.0
make
|
Java
|
// Code generated - DO NOT EDIT.
package topology
import (
"github.com/skydive-project/skydive/graffiti/getter"
"strings"
)
func (obj *NextHop) GetFieldBool(key string) (bool, error) {
return false, getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldInt64(key string) (int64, error) {
switch key {
case "Priority":
return int64(obj.Priority), nil
case "IfIndex":
return int64(obj.IfIndex), nil
}
return 0, getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldString(key string) (string, error) {
switch key {
case "IP":
return obj.IP.String(), nil
case "MAC":
return string(obj.MAC), nil
}
return "", getter.ErrFieldNotFound
}
func (obj *NextHop) GetFieldKeys() []string {
return []string{
"Priority",
"IP",
"MAC",
"IfIndex",
}
}
func (obj *NextHop) MatchBool(key string, predicate getter.BoolPredicate) bool {
return false
}
func (obj *NextHop) MatchInt64(key string, predicate getter.Int64Predicate) bool {
if b, err := obj.GetFieldInt64(key); err == nil {
return predicate(b)
}
return false
}
func (obj *NextHop) MatchString(key string, predicate getter.StringPredicate) bool {
if b, err := obj.GetFieldString(key); err == nil {
return predicate(b)
}
return false
}
func (obj *NextHop) GetField(key string) (interface{}, error) {
if s, err := obj.GetFieldString(key); err == nil {
return s, nil
}
if i, err := obj.GetFieldInt64(key); err == nil {
return i, nil
}
return nil, getter.ErrFieldNotFound
}
func init() {
strings.Index("", ".")
}
|
Java
|
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.lang.ant.config.execution;
import com.intellij.execution.filters.Filter;
import com.intellij.execution.filters.OpenFileHyperlinkInfo;
import com.intellij.execution.filters.TextConsoleBuilder;
import com.intellij.execution.filters.TextConsoleBuilderFactory;
import com.intellij.execution.process.ProcessHandler;
import com.intellij.execution.process.ProcessOutputTypes;
import com.intellij.execution.ui.ConsoleView;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.io.File;
import java.io.OutputStream;
public final class PlainTextView implements AntOutputView {
private final ConsoleView myConsole;
private final Project myProject;
private String myCommandLine;
private final LightProcessHandler myProcessHandler = new LightProcessHandler();
public PlainTextView(Project project) {
myProject = project;
TextConsoleBuilder builder = TextConsoleBuilderFactory.getInstance().createBuilder(project);
builder.addFilter(new AntMessageFilter());
builder.addFilter(new JUnitFilter());
myConsole = builder.getConsole();
myConsole.attachToProcess(myProcessHandler);
}
public void dispose() {
Disposer.dispose(myConsole);
}
@Override
public String getId() {
return "_text_view_";
}
@Override
public JComponent getComponent() {
return myConsole.getComponent();
}
@Override
@Nullable
public Object addMessage(AntMessage message) {
print(message.getText() + "\n", ProcessOutputTypes.STDOUT);
return null;
}
private void print(String text, Key type) {
myProcessHandler.notifyTextAvailable(text, type);
}
public void addMessages(AntMessage[] messages) {
for (AntMessage message : messages) {
addMessage(message);
}
}
@Override
public void addJavacMessage(AntMessage message, String url) {
if (message.getLine() > 0) {
String msg = TreeView.printMessage(message, url);
print(msg, ProcessOutputTypes.STDOUT);
}
print(message.getText(), ProcessOutputTypes.STDOUT);
}
@Override
public void addException(AntMessage exception, boolean showFullTrace) {
String text = exception.getText();
if (!showFullTrace) {
int index = text.indexOf("\r\n");
if (index != -1) {
text = text.substring(0, index) + "\n";
}
}
print(text, ProcessOutputTypes.STDOUT);
}
public void clearAllMessages() {
myConsole.clear();
}
@Override
public void startBuild(AntMessage message) {
print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM);
addMessage(message);
}
@Override
public void buildFailed(AntMessage message) {
print(myCommandLine + "\n", ProcessOutputTypes.SYSTEM);
addMessage(message);
}
@Override
public void startTarget(AntMessage message) {
addMessage(message);
}
@Override
public void startTask(AntMessage message) {
addMessage(message);
}
@Override
public void finishBuild(String messageText) {
print("\n" + messageText + "\n", ProcessOutputTypes.SYSTEM);
}
@Override
public void finishTarget() {
}
@Override
public void finishTask() {
}
@Override
@Nullable
public Object getData(@NotNull String dataId) {
return null;
}
public void setBuildCommandLine(String commandLine) {
myCommandLine = commandLine;
}
private final class JUnitFilter implements Filter {
@Override
@Nullable
public Result applyFilter(String line, int entireLength) {
HyperlinkUtil.PlaceInfo placeInfo = HyperlinkUtil.parseJUnitMessage(myProject, line);
if (placeInfo == null) {
return null;
}
int textStartOffset = entireLength - line.length();
int highlightStartOffset = textStartOffset + placeInfo.getLinkStartIndex();
int highlightEndOffset = textStartOffset + placeInfo.getLinkEndIndex() + 1;
OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, placeInfo.getFile(), placeInfo.getLine(), placeInfo.getColumn());
return new Result(highlightStartOffset, highlightEndOffset, info);
}
}
private final class AntMessageFilter implements Filter {
@Override
public Result applyFilter(String line, int entireLength) {
int afterLineNumberIndex = line.indexOf(": "); // end of file_name_and_line_number sequence
if (afterLineNumberIndex == -1) {
return null;
}
String fileAndLineNumber = line.substring(0, afterLineNumberIndex);
int index = fileAndLineNumber.lastIndexOf(':');
if (index == -1) {
return null;
}
final String fileName = fileAndLineNumber.substring(0, index);
String lineNumberStr = fileAndLineNumber.substring(index + 1).trim();
int lineNumber;
try {
lineNumber = Integer.parseInt(lineNumberStr);
}
catch (NumberFormatException e) {
return null;
}
final VirtualFile file = LocalFileSystem.getInstance().findFileByPath(fileName.replace(File.separatorChar, '/'));
if (file == null) {
return null;
}
int textStartOffset = entireLength - line.length();
int highlightEndOffset = textStartOffset + afterLineNumberIndex;
OpenFileHyperlinkInfo info = new OpenFileHyperlinkInfo(myProject, file, lineNumber - 1);
return new Result(textStartOffset, highlightEndOffset, info);
}
}
private static class LightProcessHandler extends ProcessHandler {
@Override
protected void destroyProcessImpl() {
throw new UnsupportedOperationException();
}
@Override
protected void detachProcessImpl() {
throw new UnsupportedOperationException();
}
@Override
public boolean detachIsDefault() {
return false;
}
@Override
@Nullable
public OutputStream getProcessInput() {
return null;
}
}
}
|
Java
|
/*
Copyright 2012-2022 Marco De Salvo
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using RDFSharp.Model;
using RDFSharp.Semantics.OWL;
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace RDFSharp.Semantics.SKOS
{
/// <summary>
/// RDFSKOSConceptScheme represents an instance of skos:ConceptScheme within an ontology data.
/// </summary>
public class RDFSKOSConceptScheme : RDFOntologyFact, IEnumerable<RDFSKOSConcept>
{
#region Properties
/// <summary>
/// Count of the concepts composing the scheme
/// </summary>
public long ConceptsCount
=> this.Concepts.Count;
/// <summary>
/// Count of the collections composing the scheme
/// </summary>
public long CollectionsCount
=> this.Collections.Count;
/// <summary>
/// Count of the ordered collections composing the scheme
/// </summary>
public long OrderedCollectionsCount
=> this.OrderedCollections.Count;
/// <summary>
/// Count of the labels composing the scheme
/// </summary>
public long LabelsCount
=> this.Labels.Count;
/// <summary>
/// Gets the enumerator on the concepts of the scheme for iteration
/// </summary>
public IEnumerator<RDFSKOSConcept> ConceptsEnumerator
=> this.Concepts.Values.GetEnumerator();
/// <summary>
/// Gets the enumerator on the collections of the scheme for iteration
/// </summary>
public IEnumerator<RDFSKOSCollection> CollectionsEnumerator
=> this.Collections.Values.GetEnumerator();
/// <summary>
/// Gets the enumerator on the ordered collections of the scheme for iteration
/// </summary>
public IEnumerator<RDFSKOSOrderedCollection> OrderedCollectionsEnumerator
=> this.OrderedCollections.Values.GetEnumerator();
/// <summary>
/// Gets the enumerator on the labels of the scheme for iteration
/// </summary>
public IEnumerator<RDFSKOSLabel> LabelsEnumerator
=> this.Labels.Values.GetEnumerator();
/// <summary>
/// Annotations describing concepts of the scheme
/// </summary>
public RDFSKOSAnnotations Annotations { get; internal set; }
/// <summary>
/// Relations describing concepts of the scheme
/// </summary>
public RDFSKOSRelations Relations { get; internal set; }
/// <summary>
/// Concepts contained in the scheme (encodes the 'skos:inScheme' relation)
/// </summary>
internal Dictionary<long, RDFSKOSConcept> Concepts { get; set; }
/// <summary>
/// Collections contained in the scheme
/// </summary>
internal Dictionary<long, RDFSKOSCollection> Collections { get; set; }
/// <summary>
/// OrderedCollections contained in the scheme
/// </summary>
internal Dictionary<long, RDFSKOSOrderedCollection> OrderedCollections { get; set; }
/// <summary>
/// Labels contained in the scheme
/// </summary>
internal Dictionary<long, RDFSKOSLabel> Labels { get; set; }
#endregion
#region Ctors
/// <summary>
/// Default-ctor to build a conceptScheme with the given name
/// </summary>
public RDFSKOSConceptScheme(RDFResource conceptName) : base(conceptName)
{
this.Concepts = new Dictionary<long, RDFSKOSConcept>();
this.Collections = new Dictionary<long, RDFSKOSCollection>();
this.OrderedCollections = new Dictionary<long, RDFSKOSOrderedCollection>();
this.Labels = new Dictionary<long, RDFSKOSLabel>();
this.Annotations = new RDFSKOSAnnotations();
this.Relations = new RDFSKOSRelations();
}
#endregion
#region Interfaces
/// <summary>
/// Exposes a typed enumerator on the scheme's concepts
/// </summary>
IEnumerator<RDFSKOSConcept> IEnumerable<RDFSKOSConcept>.GetEnumerator() => this.ConceptsEnumerator;
/// <summary>
/// Exposes an untyped enumerator on the scheme's concepts
/// </summary>
IEnumerator IEnumerable.GetEnumerator() => this.ConceptsEnumerator;
#endregion
#region Methods
#region Add
/// <summary>
/// Adds the given concept to the scheme
/// </summary>
public RDFSKOSConceptScheme AddConcept(RDFSKOSConcept concept)
{
if (concept != null)
{
if (!this.Concepts.ContainsKey(concept.PatternMemberID))
this.Concepts.Add(concept.PatternMemberID, concept);
}
return this;
}
/// <summary>
/// Adds the given collection to the scheme
/// </summary>
public RDFSKOSConceptScheme AddCollection(RDFSKOSCollection collection)
{
if (collection != null)
{
if (!this.Collections.ContainsKey(collection.PatternMemberID))
{
this.Collections.Add(collection.PatternMemberID, collection);
//Also add concepts of the collection
foreach (var cn in collection.Concepts.Values)
this.AddConcept(cn);
//Also adds collections of the collection
foreach (var cl in collection.Collections.Values)
this.AddCollection(cl);
}
}
return this;
}
/// <summary>
/// Adds the given ordered collection to the scheme
/// </summary>
public RDFSKOSConceptScheme AddOrderedCollection(RDFSKOSOrderedCollection orderedCollection)
{
if (orderedCollection != null)
{
if (!this.OrderedCollections.ContainsKey(orderedCollection.PatternMemberID))
{
this.OrderedCollections.Add(orderedCollection.PatternMemberID, orderedCollection);
//Also add concepts of the ordered collection
foreach (var cn in orderedCollection.Concepts.Values.OrderBy(x => x.Item1))
this.AddConcept(cn.Item2);
}
}
return this;
}
/// <summary>
/// Adds the given label to the scheme
/// </summary>
public RDFSKOSConceptScheme AddLabel(RDFSKOSLabel label)
{
if (label != null)
{
if (!this.Labels.ContainsKey(label.PatternMemberID))
this.Labels.Add(label.PatternMemberID, label);
}
return this;
}
#endregion
#region Remove
/// <summary>
/// Removes the given concept from the scheme
/// </summary>
public RDFSKOSConceptScheme RemoveConcept(RDFSKOSConcept concept)
{
if (concept != null)
{
if (this.Concepts.ContainsKey(concept.PatternMemberID))
this.Concepts.Remove(concept.PatternMemberID);
}
return this;
}
/// <summary>
/// Removes the given collection from the scheme
/// </summary>
public RDFSKOSConceptScheme RemoveCollection(RDFSKOSCollection collection)
{
if (collection != null)
{
if (this.Collections.ContainsKey(collection.PatternMemberID))
this.Collections.Remove(collection.PatternMemberID);
}
return this;
}
/// <summary>
/// Removes the given ordered collection from the scheme
/// </summary>
public RDFSKOSConceptScheme RemoveOrderedCollection(RDFSKOSOrderedCollection orderedCollection)
{
if (orderedCollection != null)
{
if (this.OrderedCollections.ContainsKey(orderedCollection.PatternMemberID))
this.OrderedCollections.Remove(orderedCollection.PatternMemberID);
}
return this;
}
/// <summary>
/// Removes the given label from the scheme
/// </summary>
public RDFSKOSConceptScheme RemoveLabel(RDFSKOSLabel label)
{
if (label != null)
{
if (this.Labels.ContainsKey(label.PatternMemberID))
this.Labels.Remove(label.PatternMemberID);
}
return this;
}
#endregion
#region Select
/// <summary>
/// Selects the concept represented by the given string from the scheme
/// </summary>
public RDFSKOSConcept SelectConcept(string concept)
{
if (concept != null)
{
long conceptID = RDFModelUtilities.CreateHash(concept);
if (this.Concepts.ContainsKey(conceptID))
return this.Concepts[conceptID];
}
return null;
}
/// <summary>
/// Selects the collection represented by the given string from the scheme
/// </summary>
public RDFSKOSCollection SelectCollection(string collection)
{
if (collection != null)
{
long collectionID = RDFModelUtilities.CreateHash(collection);
if (this.Collections.ContainsKey(collectionID))
return this.Collections[collectionID];
}
return null;
}
/// <summary>
/// Selects the ordered collection represented by the given string from the scheme
/// </summary>
public RDFSKOSOrderedCollection SelectOrderedCollection(string orderedCollection)
{
if (orderedCollection != null)
{
long orderedCollectionID = RDFModelUtilities.CreateHash(orderedCollection);
if (this.OrderedCollections.ContainsKey(orderedCollectionID))
return this.OrderedCollections[orderedCollectionID];
}
return null;
}
/// <summary>
/// Selects the label represented by the given string from the scheme
/// </summary>
public RDFSKOSLabel SelectLabel(string label)
{
if (label != null)
{
long labelID = RDFModelUtilities.CreateHash(label);
if (this.Labels.ContainsKey(labelID))
return this.Labels[labelID];
}
return null;
}
#endregion
#region Set
/// <summary>
/// Builds a new intersection scheme from this scheme and a given one
/// </summary>
public RDFSKOSConceptScheme IntersectWith(RDFSKOSConceptScheme conceptScheme)
{
RDFSKOSConceptScheme result = new RDFSKOSConceptScheme(new RDFResource());
if (conceptScheme != null)
{
//Add intersection concepts
foreach (RDFSKOSConcept c in this)
{
if (conceptScheme.Concepts.ContainsKey(c.PatternMemberID))
result.AddConcept(c);
}
//Add intersection collections
foreach (RDFSKOSCollection c in this.Collections.Values)
{
if (conceptScheme.Collections.ContainsKey(c.PatternMemberID))
result.AddCollection(c);
}
//Add intersection ordered collections
foreach (RDFSKOSOrderedCollection o in this.OrderedCollections.Values)
{
if (conceptScheme.OrderedCollections.ContainsKey(o.PatternMemberID))
result.AddOrderedCollection(o);
}
//Add intersection labels
foreach (RDFSKOSLabel l in this.Labels.Values)
{
if (conceptScheme.Labels.ContainsKey(l.PatternMemberID))
result.AddLabel(l);
}
//Add intersection relations
result.Relations.TopConcept = this.Relations.TopConcept.IntersectWith(conceptScheme.Relations.TopConcept);
result.Relations.Broader = this.Relations.Broader.IntersectWith(conceptScheme.Relations.Broader);
result.Relations.BroaderTransitive = this.Relations.BroaderTransitive.IntersectWith(conceptScheme.Relations.BroaderTransitive);
result.Relations.BroadMatch = this.Relations.BroadMatch.IntersectWith(conceptScheme.Relations.BroadMatch);
result.Relations.Narrower = this.Relations.Narrower.IntersectWith(conceptScheme.Relations.Narrower);
result.Relations.NarrowerTransitive = this.Relations.NarrowerTransitive.IntersectWith(conceptScheme.Relations.NarrowerTransitive);
result.Relations.NarrowMatch = this.Relations.NarrowMatch.IntersectWith(conceptScheme.Relations.NarrowMatch);
result.Relations.Related = this.Relations.Related.IntersectWith(conceptScheme.Relations.Related);
result.Relations.RelatedMatch = this.Relations.RelatedMatch.IntersectWith(conceptScheme.Relations.RelatedMatch);
result.Relations.SemanticRelation = this.Relations.SemanticRelation.IntersectWith(conceptScheme.Relations.SemanticRelation);
result.Relations.MappingRelation = this.Relations.MappingRelation.IntersectWith(conceptScheme.Relations.MappingRelation);
result.Relations.CloseMatch = this.Relations.CloseMatch.IntersectWith(conceptScheme.Relations.CloseMatch);
result.Relations.ExactMatch = this.Relations.ExactMatch.IntersectWith(conceptScheme.Relations.ExactMatch);
result.Relations.Notation = this.Relations.Notation.IntersectWith(conceptScheme.Relations.Notation);
result.Relations.PrefLabel = this.Relations.PrefLabel.IntersectWith(conceptScheme.Relations.PrefLabel);
result.Relations.AltLabel = this.Relations.AltLabel.IntersectWith(conceptScheme.Relations.AltLabel);
result.Relations.HiddenLabel = this.Relations.HiddenLabel.IntersectWith(conceptScheme.Relations.HiddenLabel);
result.Relations.LiteralForm = this.Relations.LiteralForm.IntersectWith(conceptScheme.Relations.LiteralForm);
result.Relations.LabelRelation = this.Relations.LabelRelation.IntersectWith(conceptScheme.Relations.LabelRelation);
//Add intersection annotations
result.Annotations.PrefLabel = this.Annotations.PrefLabel.IntersectWith(conceptScheme.Annotations.PrefLabel);
result.Annotations.AltLabel = this.Annotations.AltLabel.IntersectWith(conceptScheme.Annotations.AltLabel);
result.Annotations.HiddenLabel = this.Annotations.HiddenLabel.IntersectWith(conceptScheme.Annotations.HiddenLabel);
result.Annotations.Note = this.Annotations.Note.IntersectWith(conceptScheme.Annotations.Note);
result.Annotations.ChangeNote = this.Annotations.ChangeNote.IntersectWith(conceptScheme.Annotations.ChangeNote);
result.Annotations.EditorialNote = this.Annotations.EditorialNote.IntersectWith(conceptScheme.Annotations.EditorialNote);
result.Annotations.HistoryNote = this.Annotations.HistoryNote.IntersectWith(conceptScheme.Annotations.HistoryNote);
result.Annotations.ScopeNote = this.Annotations.ScopeNote.IntersectWith(conceptScheme.Annotations.ScopeNote);
result.Annotations.Definition = this.Annotations.Definition.IntersectWith(conceptScheme.Annotations.Definition);
result.Annotations.Example = this.Annotations.Example.IntersectWith(conceptScheme.Annotations.Example);
}
return result;
}
/// <summary>
/// Builds a new union scheme from this scheme and a given one
/// </summary>
public RDFSKOSConceptScheme UnionWith(RDFSKOSConceptScheme conceptScheme)
{
RDFSKOSConceptScheme result = new RDFSKOSConceptScheme(new RDFResource());
//Add concepts from this scheme
foreach (RDFSKOSConcept c in this)
result.AddConcept(c);
//Add collections from this scheme
foreach (RDFSKOSCollection c in this.Collections.Values)
result.AddCollection(c);
//Add ordered collections from this scheme
foreach (RDFSKOSOrderedCollection o in this.OrderedCollections.Values)
result.AddOrderedCollection(o);
//Add labels from this scheme
foreach (RDFSKOSLabel l in this.Labels.Values)
result.AddLabel(l);
//Add relations from this scheme
result.Relations.TopConcept = result.Relations.TopConcept.UnionWith(this.Relations.TopConcept);
result.Relations.Broader = result.Relations.Broader.UnionWith(this.Relations.Broader);
result.Relations.BroaderTransitive = result.Relations.BroaderTransitive.UnionWith(this.Relations.BroaderTransitive);
result.Relations.BroadMatch = result.Relations.BroadMatch.UnionWith(this.Relations.BroadMatch);
result.Relations.Narrower = result.Relations.Narrower.UnionWith(this.Relations.Narrower);
result.Relations.NarrowerTransitive = result.Relations.NarrowerTransitive.UnionWith(this.Relations.NarrowerTransitive);
result.Relations.NarrowMatch = result.Relations.NarrowMatch.UnionWith(this.Relations.NarrowMatch);
result.Relations.Related = result.Relations.Related.UnionWith(this.Relations.Related);
result.Relations.RelatedMatch = result.Relations.RelatedMatch.UnionWith(this.Relations.RelatedMatch);
result.Relations.SemanticRelation = result.Relations.SemanticRelation.UnionWith(this.Relations.SemanticRelation);
result.Relations.MappingRelation = result.Relations.MappingRelation.UnionWith(this.Relations.MappingRelation);
result.Relations.CloseMatch = result.Relations.CloseMatch.UnionWith(this.Relations.CloseMatch);
result.Relations.ExactMatch = result.Relations.ExactMatch.UnionWith(this.Relations.ExactMatch);
result.Relations.Notation = result.Relations.Notation.UnionWith(this.Relations.Notation);
result.Relations.PrefLabel = result.Relations.PrefLabel.UnionWith(this.Relations.PrefLabel);
result.Relations.AltLabel = result.Relations.AltLabel.UnionWith(this.Relations.AltLabel);
result.Relations.HiddenLabel = result.Relations.HiddenLabel.UnionWith(this.Relations.HiddenLabel);
result.Relations.LiteralForm = result.Relations.LiteralForm.UnionWith(this.Relations.LiteralForm);
result.Relations.LabelRelation = result.Relations.LabelRelation.UnionWith(this.Relations.LabelRelation);
//Add annotations from this scheme
result.Annotations.PrefLabel = result.Annotations.PrefLabel.UnionWith(this.Annotations.PrefLabel);
result.Annotations.AltLabel = result.Annotations.AltLabel.UnionWith(this.Annotations.AltLabel);
result.Annotations.HiddenLabel = result.Annotations.HiddenLabel.UnionWith(this.Annotations.HiddenLabel);
result.Annotations.Note = result.Annotations.Note.UnionWith(this.Annotations.Note);
result.Annotations.ChangeNote = result.Annotations.ChangeNote.UnionWith(this.Annotations.ChangeNote);
result.Annotations.EditorialNote = result.Annotations.EditorialNote.UnionWith(this.Annotations.EditorialNote);
result.Annotations.HistoryNote = result.Annotations.HistoryNote.UnionWith(this.Annotations.HistoryNote);
result.Annotations.ScopeNote = result.Annotations.ScopeNote.UnionWith(this.Annotations.ScopeNote);
result.Annotations.Definition = result.Annotations.Definition.UnionWith(this.Annotations.Definition);
result.Annotations.Example = result.Annotations.Example.UnionWith(this.Annotations.Example);
//Manage the given scheme
if (conceptScheme != null)
{
//Add concepts from the given scheme
foreach (RDFSKOSConcept c in conceptScheme)
result.AddConcept(c);
//Add collections from the given scheme
foreach (RDFSKOSCollection c in conceptScheme.Collections.Values)
result.AddCollection(c);
//Add ordered collections from the given scheme
foreach (RDFSKOSOrderedCollection o in conceptScheme.OrderedCollections.Values)
result.AddOrderedCollection(o);
//Add labels from the given scheme
foreach (RDFSKOSLabel l in conceptScheme.Labels.Values)
result.AddLabel(l);
//Add relations from the given scheme
result.Relations.TopConcept = result.Relations.TopConcept.UnionWith(conceptScheme.Relations.TopConcept);
result.Relations.Broader = result.Relations.Broader.UnionWith(conceptScheme.Relations.Broader);
result.Relations.BroaderTransitive = result.Relations.BroaderTransitive.UnionWith(conceptScheme.Relations.BroaderTransitive);
result.Relations.BroadMatch = result.Relations.BroadMatch.UnionWith(conceptScheme.Relations.BroadMatch);
result.Relations.Narrower = result.Relations.Narrower.UnionWith(conceptScheme.Relations.Narrower);
result.Relations.NarrowerTransitive = result.Relations.NarrowerTransitive.UnionWith(conceptScheme.Relations.NarrowerTransitive);
result.Relations.NarrowMatch = result.Relations.NarrowMatch.UnionWith(conceptScheme.Relations.NarrowMatch);
result.Relations.Related = result.Relations.Related.UnionWith(conceptScheme.Relations.Related);
result.Relations.RelatedMatch = result.Relations.RelatedMatch.UnionWith(conceptScheme.Relations.RelatedMatch);
result.Relations.SemanticRelation = result.Relations.SemanticRelation.UnionWith(conceptScheme.Relations.SemanticRelation);
result.Relations.MappingRelation = result.Relations.MappingRelation.UnionWith(conceptScheme.Relations.MappingRelation);
result.Relations.CloseMatch = result.Relations.CloseMatch.UnionWith(conceptScheme.Relations.CloseMatch);
result.Relations.ExactMatch = result.Relations.ExactMatch.UnionWith(conceptScheme.Relations.ExactMatch);
result.Relations.Notation = result.Relations.Notation.UnionWith(conceptScheme.Relations.Notation);
result.Relations.PrefLabel = result.Relations.PrefLabel.UnionWith(conceptScheme.Relations.PrefLabel);
result.Relations.AltLabel = result.Relations.AltLabel.UnionWith(conceptScheme.Relations.AltLabel);
result.Relations.HiddenLabel = result.Relations.HiddenLabel.UnionWith(conceptScheme.Relations.HiddenLabel);
result.Relations.LiteralForm = result.Relations.LiteralForm.UnionWith(conceptScheme.Relations.LiteralForm);
result.Relations.LabelRelation = result.Relations.LabelRelation.UnionWith(conceptScheme.Relations.LabelRelation);
//Add annotations from the given scheme
result.Annotations.PrefLabel = result.Annotations.PrefLabel.UnionWith(conceptScheme.Annotations.PrefLabel);
result.Annotations.AltLabel = result.Annotations.AltLabel.UnionWith(conceptScheme.Annotations.AltLabel);
result.Annotations.HiddenLabel = result.Annotations.HiddenLabel.UnionWith(conceptScheme.Annotations.HiddenLabel);
result.Annotations.Note = result.Annotations.Note.UnionWith(conceptScheme.Annotations.Note);
result.Annotations.ChangeNote = result.Annotations.ChangeNote.UnionWith(conceptScheme.Annotations.ChangeNote);
result.Annotations.EditorialNote = result.Annotations.EditorialNote.UnionWith(conceptScheme.Annotations.EditorialNote);
result.Annotations.HistoryNote = result.Annotations.HistoryNote.UnionWith(conceptScheme.Annotations.HistoryNote);
result.Annotations.ScopeNote = result.Annotations.ScopeNote.UnionWith(conceptScheme.Annotations.ScopeNote);
result.Annotations.Definition = result.Annotations.Definition.UnionWith(conceptScheme.Annotations.Definition);
result.Annotations.Example = result.Annotations.Example.UnionWith(conceptScheme.Annotations.Example);
}
return result;
}
/// <summary>
/// Builds a new difference scheme from this scheme and a given one
/// </summary>
public RDFSKOSConceptScheme DifferenceWith(RDFSKOSConceptScheme conceptScheme)
{
RDFSKOSConceptScheme result = new RDFSKOSConceptScheme(new RDFResource());
if (conceptScheme != null)
{
//Add difference concepts
foreach (RDFSKOSConcept c in this)
{
if (!conceptScheme.Concepts.ContainsKey(c.PatternMemberID))
result.AddConcept(c);
}
//Add difference collections
foreach (RDFSKOSCollection c in this.Collections.Values)
{
if (!conceptScheme.Collections.ContainsKey(c.PatternMemberID))
result.AddCollection(c);
}
//Add difference ordered collections
foreach (RDFSKOSOrderedCollection o in this.OrderedCollections.Values)
{
if (!conceptScheme.OrderedCollections.ContainsKey(o.PatternMemberID))
result.AddOrderedCollection(o);
}
//Add difference labels
foreach (RDFSKOSLabel l in this.Labels.Values)
{
if (!conceptScheme.Labels.ContainsKey(l.PatternMemberID))
result.AddLabel(l);
}
//Add difference relations
result.Relations.TopConcept = this.Relations.TopConcept.DifferenceWith(conceptScheme.Relations.TopConcept);
result.Relations.Broader = this.Relations.Broader.DifferenceWith(conceptScheme.Relations.Broader);
result.Relations.BroaderTransitive = this.Relations.BroaderTransitive.DifferenceWith(conceptScheme.Relations.BroaderTransitive);
result.Relations.BroadMatch = this.Relations.BroadMatch.DifferenceWith(conceptScheme.Relations.BroadMatch);
result.Relations.Narrower = this.Relations.Narrower.DifferenceWith(conceptScheme.Relations.Narrower);
result.Relations.NarrowerTransitive = this.Relations.NarrowerTransitive.DifferenceWith(conceptScheme.Relations.NarrowerTransitive);
result.Relations.NarrowMatch = this.Relations.NarrowMatch.DifferenceWith(conceptScheme.Relations.NarrowMatch);
result.Relations.Related = this.Relations.Related.DifferenceWith(conceptScheme.Relations.Related);
result.Relations.RelatedMatch = this.Relations.RelatedMatch.DifferenceWith(conceptScheme.Relations.RelatedMatch);
result.Relations.SemanticRelation = this.Relations.SemanticRelation.DifferenceWith(conceptScheme.Relations.SemanticRelation);
result.Relations.MappingRelation = this.Relations.MappingRelation.DifferenceWith(conceptScheme.Relations.MappingRelation);
result.Relations.CloseMatch = this.Relations.CloseMatch.DifferenceWith(conceptScheme.Relations.CloseMatch);
result.Relations.ExactMatch = this.Relations.ExactMatch.DifferenceWith(conceptScheme.Relations.ExactMatch);
result.Relations.Notation = this.Relations.Notation.DifferenceWith(conceptScheme.Relations.Notation);
result.Relations.PrefLabel = this.Relations.PrefLabel.DifferenceWith(conceptScheme.Relations.PrefLabel);
result.Relations.AltLabel = this.Relations.AltLabel.DifferenceWith(conceptScheme.Relations.AltLabel);
result.Relations.HiddenLabel = this.Relations.HiddenLabel.DifferenceWith(conceptScheme.Relations.HiddenLabel);
result.Relations.LiteralForm = this.Relations.LiteralForm.DifferenceWith(conceptScheme.Relations.LiteralForm);
result.Relations.LabelRelation = this.Relations.LabelRelation.DifferenceWith(conceptScheme.Relations.LabelRelation);
//Add difference annotations
result.Annotations.PrefLabel = this.Annotations.PrefLabel.DifferenceWith(conceptScheme.Annotations.PrefLabel);
result.Annotations.AltLabel = this.Annotations.AltLabel.DifferenceWith(conceptScheme.Annotations.AltLabel);
result.Annotations.HiddenLabel = this.Annotations.HiddenLabel.DifferenceWith(conceptScheme.Annotations.HiddenLabel);
result.Annotations.Note = this.Annotations.Note.DifferenceWith(conceptScheme.Annotations.Note);
result.Annotations.ChangeNote = this.Annotations.ChangeNote.DifferenceWith(conceptScheme.Annotations.ChangeNote);
result.Annotations.EditorialNote = this.Annotations.EditorialNote.DifferenceWith(conceptScheme.Annotations.EditorialNote);
result.Annotations.HistoryNote = this.Annotations.HistoryNote.DifferenceWith(conceptScheme.Annotations.HistoryNote);
result.Annotations.ScopeNote = this.Annotations.ScopeNote.DifferenceWith(conceptScheme.Annotations.ScopeNote);
result.Annotations.Definition = this.Annotations.Definition.DifferenceWith(conceptScheme.Annotations.Definition);
result.Annotations.Example = this.Annotations.Example.DifferenceWith(conceptScheme.Annotations.Example);
}
else
{
//Add concepts from this scheme
foreach (RDFSKOSConcept c in this)
result.AddConcept(c);
//Add collections from this scheme
foreach (RDFSKOSCollection c in this.Collections.Values)
result.AddCollection(c);
//Add ordered collections from this scheme
foreach (RDFSKOSOrderedCollection o in this.OrderedCollections.Values)
result.AddOrderedCollection(o);
//Add labels from this scheme
foreach (RDFSKOSLabel l in this.Labels.Values)
result.AddLabel(l);
//Add relations from this scheme
result.Relations.TopConcept = result.Relations.TopConcept.UnionWith(this.Relations.TopConcept);
result.Relations.Broader = result.Relations.Broader.UnionWith(this.Relations.Broader);
result.Relations.BroaderTransitive = result.Relations.BroaderTransitive.UnionWith(this.Relations.BroaderTransitive);
result.Relations.BroadMatch = result.Relations.BroadMatch.UnionWith(this.Relations.BroadMatch);
result.Relations.Narrower = result.Relations.Narrower.UnionWith(this.Relations.Narrower);
result.Relations.NarrowerTransitive = result.Relations.NarrowerTransitive.UnionWith(this.Relations.NarrowerTransitive);
result.Relations.NarrowMatch = result.Relations.NarrowMatch.UnionWith(this.Relations.NarrowMatch);
result.Relations.Related = result.Relations.Related.UnionWith(this.Relations.Related);
result.Relations.RelatedMatch = result.Relations.RelatedMatch.UnionWith(this.Relations.RelatedMatch);
result.Relations.SemanticRelation = result.Relations.SemanticRelation.UnionWith(this.Relations.SemanticRelation);
result.Relations.MappingRelation = result.Relations.MappingRelation.UnionWith(this.Relations.MappingRelation);
result.Relations.CloseMatch = result.Relations.CloseMatch.UnionWith(this.Relations.CloseMatch);
result.Relations.ExactMatch = result.Relations.ExactMatch.UnionWith(this.Relations.ExactMatch);
result.Relations.Notation = result.Relations.Notation.UnionWith(this.Relations.Notation);
result.Relations.PrefLabel = result.Relations.PrefLabel.UnionWith(this.Relations.PrefLabel);
result.Relations.AltLabel = result.Relations.AltLabel.UnionWith(this.Relations.AltLabel);
result.Relations.HiddenLabel = result.Relations.HiddenLabel.UnionWith(this.Relations.HiddenLabel);
result.Relations.LiteralForm = result.Relations.LiteralForm.UnionWith(this.Relations.LiteralForm);
result.Relations.LabelRelation = result.Relations.LabelRelation.UnionWith(this.Relations.LabelRelation);
//Add annotations from this scheme
result.Annotations.PrefLabel = result.Annotations.PrefLabel.UnionWith(this.Annotations.PrefLabel);
result.Annotations.AltLabel = result.Annotations.AltLabel.UnionWith(this.Annotations.AltLabel);
result.Annotations.HiddenLabel = result.Annotations.HiddenLabel.UnionWith(this.Annotations.HiddenLabel);
result.Annotations.Note = result.Annotations.Note.UnionWith(this.Annotations.Note);
result.Annotations.ChangeNote = result.Annotations.ChangeNote.UnionWith(this.Annotations.ChangeNote);
result.Annotations.EditorialNote = result.Annotations.EditorialNote.UnionWith(this.Annotations.EditorialNote);
result.Annotations.HistoryNote = result.Annotations.HistoryNote.UnionWith(this.Annotations.HistoryNote);
result.Annotations.ScopeNote = result.Annotations.ScopeNote.UnionWith(this.Annotations.ScopeNote);
result.Annotations.Definition = result.Annotations.Definition.UnionWith(this.Annotations.Definition);
result.Annotations.Example = result.Annotations.Example.UnionWith(this.Annotations.Example);
}
return result;
}
#endregion
#region Convert
/// <summary>
/// Gets a graph representation of this scheme, exporting inferences according to the selected behavior
/// </summary>
public RDFGraph ToRDFGraph(RDFSemanticsEnums.RDFOntologyInferenceExportBehavior infexpBehavior)
=> this.ToRDFOntologyData().ToRDFGraph(infexpBehavior);
/// <summary>
/// Asynchronously gets a graph representation of this scheme, exporting inferences according to the selected behavior
/// </summary>
public Task<RDFGraph> ToRDFGraphAsync(RDFSemanticsEnums.RDFOntologyInferenceExportBehavior infexpBehavior)
=> Task.Run(() => ToRDFGraph(infexpBehavior));
/// <summary>
/// Gets an ontology data representation of this scheme
/// </summary>
public RDFOntologyData ToRDFOntologyData()
{
RDFOntologyData result = new RDFOntologyData();
//ConceptScheme
result.AddFact(this);
result.AddClassTypeRelation(this, RDFVocabulary.SKOS.CONCEPT_SCHEME.ToRDFOntologyClass());
//Concepts
foreach (RDFSKOSConcept c in this)
{
result.AddFact(c);
result.AddClassTypeRelation(c, RDFVocabulary.SKOS.CONCEPT.ToRDFOntologyClass());
result.AddAssertionRelation(c, RDFVocabulary.SKOS.IN_SCHEME.ToRDFOntologyObjectProperty(), this);
}
//Collections
foreach (RDFSKOSCollection c in this.Collections.Values)
{
result.AddAssertionRelation(c, RDFVocabulary.SKOS.IN_SCHEME.ToRDFOntologyObjectProperty(), this);
result = result.UnionWith(c.ToRDFOntologyData());
}
//OrderedCollections
foreach (RDFSKOSOrderedCollection o in this.OrderedCollections.Values)
{
result.AddAssertionRelation(o, RDFVocabulary.SKOS.IN_SCHEME.ToRDFOntologyObjectProperty(), this);
result = result.UnionWith(o.ToRDFOntologyData());
}
//Labels
foreach (RDFSKOSLabel l in this.Labels.Values)
{
result.AddFact(l);
result.AddClassTypeRelation(l, RDFVocabulary.SKOS.SKOSXL.LABEL.ToRDFOntologyClass());
result.AddAssertionRelation(l, RDFVocabulary.SKOS.IN_SCHEME.ToRDFOntologyObjectProperty(), this);
}
//Assertions
result.Relations.Assertions = result.Relations.Assertions.UnionWith(this.Relations.TopConcept)
.UnionWith(this.Relations.Broader)
.UnionWith(this.Relations.BroaderTransitive)
.UnionWith(this.Relations.BroadMatch)
.UnionWith(this.Relations.Narrower)
.UnionWith(this.Relations.NarrowerTransitive)
.UnionWith(this.Relations.NarrowMatch)
.UnionWith(this.Relations.Related)
.UnionWith(this.Relations.RelatedMatch)
.UnionWith(this.Relations.SemanticRelation)
.UnionWith(this.Relations.MappingRelation)
.UnionWith(this.Relations.CloseMatch)
.UnionWith(this.Relations.ExactMatch)
.UnionWith(this.Relations.Notation)
.UnionWith(this.Relations.PrefLabel)
.UnionWith(this.Relations.AltLabel)
.UnionWith(this.Relations.HiddenLabel)
.UnionWith(this.Relations.LiteralForm)
.UnionWith(this.Relations.LabelRelation);
//Annotations
result.Annotations.CustomAnnotations = result.Annotations.CustomAnnotations.UnionWith(this.Annotations.PrefLabel)
.UnionWith(this.Annotations.AltLabel)
.UnionWith(this.Annotations.HiddenLabel)
.UnionWith(this.Annotations.Note)
.UnionWith(this.Annotations.ChangeNote)
.UnionWith(this.Annotations.EditorialNote)
.UnionWith(this.Annotations.HistoryNote)
.UnionWith(this.Annotations.ScopeNote)
.UnionWith(this.Annotations.Definition)
.UnionWith(this.Annotations.Example);
return result;
}
/// <summary>
/// Asynchronously gets an ontology data representation of this scheme
/// </summary>
public Task<RDFOntologyData> ToRDFOntologyDataAsync()
=> Task.Run(() => ToRDFOntologyData());
#endregion
#endregion
}
}
|
Java
|
# frozen_string_literal: true
module Beta
module Types
module Objects
class PrefectureType < Beta::Types::Objects::Base
implements GraphQL::Types::Relay::Node
global_id_field :id
field :annict_id, Integer, null: false
field :name, String, null: false
end
end
end
end
|
Java
|
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.command.script;
import java.util.Map;
import java.util.Map.Entry;
import javax.script.Bindings;
import javax.script.Invocable;
import javax.script.ScriptContext;
import javax.script.ScriptEngine;
import javax.script.ScriptException;
import com.orientechnologies.orient.core.Orient;
import com.orientechnologies.orient.core.command.OCommandExecutorAbstract;
import com.orientechnologies.orient.core.command.OCommandRequest;
import com.orientechnologies.orient.core.db.record.ODatabaseRecordTx;
import com.orientechnologies.orient.core.metadata.function.OFunction;
/**
* Executes Script Commands.
*
* @see OCommandScript
* @author Luca Garulli
*
*/
public class OCommandExecutorFunction extends OCommandExecutorAbstract {
protected OCommandFunction request;
public OCommandExecutorFunction() {
}
@SuppressWarnings("unchecked")
public OCommandExecutorFunction parse(final OCommandRequest iRequest) {
request = (OCommandFunction) iRequest;
return this;
}
public Object execute(final Map<Object, Object> iArgs) {
return executeInContext(null, iArgs);
}
public Object executeInContext(final Map<String, Object> iContext, final Map<Object, Object> iArgs) {
parserText = request.getText();
final ODatabaseRecordTx db = (ODatabaseRecordTx) getDatabase();
final OFunction f = db.getMetadata().getFunctionLibrary().getFunction(parserText);
final OScriptManager scriptManager = Orient.instance().getScriptManager();
final ScriptEngine scriptEngine = scriptManager.getEngine(f.getLanguage());
final Bindings binding = scriptManager.bind(scriptEngine, db, iContext, iArgs);
try {
scriptEngine.setBindings(binding, ScriptContext.ENGINE_SCOPE);
// COMPILE FUNCTION LIBRARY
scriptEngine.eval(scriptManager.getLibrary(db, f.getLanguage()));
if (scriptEngine instanceof Invocable) {
// INVOKE AS FUNCTION. PARAMS ARE PASSED BY POSITION
final Invocable invocableEngine = (Invocable) scriptEngine;
Object[] args = null;
if (iArgs != null) {
args = new Object[iArgs.size()];
int i = 0;
for (Entry<Object, Object> arg : iArgs.entrySet())
args[i++] = arg.getValue();
}
return invocableEngine.invokeFunction(parserText, args);
} else {
// INVOKE THE CODE SNIPPET
return scriptEngine.eval(invokeFunction(f, iArgs.values().toArray()), binding);
}
} catch (ScriptException e) {
throw new OCommandScriptException("Error on execution of the script", request.getText(), e.getColumnNumber(), e);
} catch (NoSuchMethodException e) {
throw new OCommandScriptException("Error on execution of the script", request.getText(), 0, e);
} finally {
scriptManager.unbind(binding);
}
}
public boolean isIdempotent() {
return false;
}
@Override
protected void throwSyntaxErrorException(String iText) {
throw new OCommandScriptException("Error on execution of the script: " + iText, request.getText(), 0);
}
protected String invokeFunction(final OFunction f, Object[] iArgs) {
final StringBuilder code = new StringBuilder();
code.append(f.getName());
code.append('(');
int i = 0;
for (Object a : iArgs) {
if (i++ > 0)
code.append(',');
code.append(a);
}
code.append(");");
return code.toString();
}
}
|
Java
|
import java.util.Scanner;
/**
* @author Oleg Cherednik
* @since 13.07.2018
*/
public class Solution {
static int palindromeIndex(String s) {
for (int i = 0, j = s.length() - 1; i < j; i++, j--) {
if (s.charAt(i) == s.charAt(j))
continue;
for (int k = i, m = j - 1; k < m; k++, m--)
if (s.charAt(k) != s.charAt(m))
return i;
return j;
}
return -1;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) {
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String s = scanner.nextLine();
int result = palindromeIndex(s);
System.out.println(String.valueOf(result));
}
scanner.close();
}
}
|
Java
|
package de.newsarea.homecockpit.connector.facade.registration.util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.net.URL;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
public final class ClassLoaderHelper {
private static Logger log = LoggerFactory.getLogger(ClassLoaderHelper.class);
private ClassLoaderHelper() { }
public static Constructor<?> determineFirstConstructor(Class<?> clazz) {
try {
for(Constructor<?> constructor : clazz.getConstructors()) {
return constructor;
}
} catch (SecurityException e) {
log.error(e.getMessage(), e);
}
return null;
}
public static Constructor<?> determineConstructorByArgumentTypes(Class<?> clazz, Class<?>[] argumentTypes) {
try {
for(Constructor<?> constructor : clazz.getConstructors()) {
if(isAssignableFrom(constructor, argumentTypes)) {
return constructor;
}
}
} catch (SecurityException e) {
log.error(e.getMessage(), e);
}
return null;
}
private static boolean isAssignableFrom(Constructor<?> constructor, Class<?>[] argumentTypes) {
Class<?>[] constructorArgTypes = constructor.getParameterTypes();
if(constructorArgTypes.length != argumentTypes.length) {
return false;
}
// ~
for(int i=0; i < argumentTypes.length; i++) {
if(!argumentTypes[i].isAssignableFrom(constructorArgTypes[i])) {
return false;
}
}
return true;
}
public static List<Class<?>> determineClasses(String packageName) throws ClassNotFoundException, IOException {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
assert classLoader != null;
String path = packageName.replace('.', '/');
Enumeration<URL> resources = classLoader.getResources(path);
List<File> dirs = new ArrayList<>();
while (resources.hasMoreElements()) {
URL resource = resources.nextElement();
dirs.add(new File(resource.getFile().replaceAll("%20", " ")));
}
ArrayList<Class<?>> classes = new ArrayList<>();
for (File directory : dirs) {
classes.addAll(findClasses(directory, packageName));
}
return classes;
}
public static List<Class<?>> findClasses(File directory, String packageName) throws ClassNotFoundException {
List<Class<?>> classes = new ArrayList<>();
if (!directory.exists()) {
return classes;
}
File[] files = directory.listFiles();
for (File file : files) {
if (file.isDirectory()) {
assert !file.getName().contains(".");
classes.addAll(findClasses(file, packageName + "." + file.getName()));
} else if (file.getName().endsWith(".class")) {
classes.add(Class.forName(packageName + '.' + file.getName().substring(0, file.getName().length() - 6)));
}
}
return classes;
}
public static Method determineSetterMethod(Class<?> clazz, String name) {
for(Method method : clazz.getMethods()) {
if(method.getName().equalsIgnoreCase("set" + name)) {
return method;
}
}
return null;
}
}
|
Java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Upendra Jariya
* @sponsor Douglas Johnson
* @version 1.0
* @since 2014-11-10
*/
package tools.datasync.utils;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.log4j.Logger;
import tools.datasync.api.utils.HashGenerator;
public class Md5HashGenerator implements HashGenerator {
private static Md5HashGenerator instance = null;
private static Logger LOG = Logger.getLogger(Md5HashGenerator.class
.getName());
private Md5HashGenerator() {
}
public static synchronized Md5HashGenerator getInstance() {
if (instance == null) {
instance = new Md5HashGenerator();
}
return instance;
}
public String generate(String data) {
try {
byte[] digest = DigestUtils.md5(data);
return (DigestUtils.md5Hex(digest));
} catch (Exception e) {
LOG.warn("Error while generating checksum on value [" + data + "]",
e);
return null;
}
}
public boolean validate(String data, String hash) {
String newHash = generate(data);
return newHash.equals(hash);
}
}
|
Java
|
package com.github.database.rider.core.script;
import org.assertj.core.api.SoftAssertions;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import javax.script.ScriptException;
import static org.assertj.core.api.Java6Assertions.assertThat;
public class ScriptEngineManagerWrapperTest {
@Rule
public ExpectedException exceptionRule = ExpectedException.none();
private ScriptEngineManagerWrapper scriptEngineManager = ScriptEngineManagerWrapper.getInstance();
private SoftAssertions softly = new SoftAssertions();
@Before
public void init() {
softly = new SoftAssertions();
}
@Test
public void shouldGetJsScriptResult() throws ScriptException {
Object scriptResult = ScriptEngineManagerWrapper.getInstance().getScriptResult("js: 1+1");
assertThat(scriptResult).isEqualTo(2);
}
@Test
public void shouldGetGroovyScriptResult() throws ScriptException {
Object scriptResult = scriptEngineManager.getScriptResult("groovy: 1+1");
assertThat(scriptResult).isEqualTo(2);
}
@Test
public void shouldNotGetScriptResultFromUnknownEngine() throws ScriptException {
exceptionRule.expect(RuntimeException.class);
exceptionRule.expectMessage("Could not find script engine by name 'kotlin'");
scriptEngineManager.getScriptResult("kotlin: 1+1");
}
@Test
public void shouldAssertValueGreaterThanZero() throws ScriptException {
String script = "js:(value > 0)";
softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("js script with value=2").isTrue();
softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("js script with value=0").isFalse();
softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("js script with value=-1").isFalse();
script = "groovy:(value > 0)";
softly.assertThat(scriptEngineManager.getScriptAssert(script, 2)).as("groovy script with value=2").isTrue();
softly.assertThat(scriptEngineManager.getScriptAssert(script, 0)).as("groovy script with value=0").isFalse();
softly.assertThat(scriptEngineManager.getScriptAssert(script, -1)).as("groovy script with value=-1").isFalse();
softly.assertAll();
}
@Test
public void shouldAssertNullValue() throws ScriptException {
SoftAssertions soft = new SoftAssertions();
String script = "js:(value == null)";
soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("js script with null value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("js script with non-null value").isFalse();
script = "groovy:(value == null)";
soft.assertThat(scriptEngineManager.getScriptAssert(script, null)).as("groovy script with null value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, 1)).as("groovy script with non-null value").isFalse();
soft.assertAll();
}
@Test
public void shouldAssertContainsValue() throws ScriptException {
SoftAssertions soft = new SoftAssertions();
String script = "js:(value.contains('dbunit'))";
soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("js script with 'dbunit rules' value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("js script 'database rider' value").isFalse();
script = "groovy:(value.contains('dbunit'))";
soft.assertThat(scriptEngineManager.getScriptAssert(script, "dbunit rules")).as("groovy script with 'dbunit rules' value").isTrue();
soft.assertThat(scriptEngineManager.getScriptAssert(script, "database rider rules")).as("groovy script 'database rider' value").isFalse();
soft.assertAll();
}
@Test
public void shouldNotAssertInvalidScript() throws ScriptException {
exceptionRule.expect(ScriptException.class);
exceptionRule.expectMessage("value.includes is not a function");
String script = "js:(value.includes('dbunit'))";
scriptEngineManager.getScriptAssert(script, "dbunit rules");
}
}
|
Java
|
/*global describe, beforeEach, it*/
'use strict';
var assert = require('yeoman-generator').assert;
var helper = require('./helper');
describe('mcap:connections', function () {
beforeEach(function (done) {
var answers = {
name: 'MyApp'
};
// Creates a generateor with the default options / arguments
helper.createAppGenerator({
answers: answers
}, done);
});
it('creates expected files', function (done) {
var expectedFiles = [
'connections/sap.json'
];
var expectedContent = {
name: 'SAP',
description: 'SAP API',
type: 'rest',
properties: {
descriptorUrl: 'http://sap.mway.io',
username: 'admin',
password: 'root'
}
};
var answers = {
name: 'SAP',
description: 'SAP API',
descriptorUrl: 'http://sap.mway.io',
username: 'admin',
password: 'root'
};
helper.createSubGenerator('connection', {answers: answers}, function () {
assert.file(expectedFiles);
helper.deepEqual('connections/sap.json', expectedContent);
done();
});
});
});
|
Java
|
/*
* Copyright 2018 Google LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.tools.jib.api;
import java.util.Objects;
/** Holds credentials (username and password). */
public class Credential {
// If the username is set to <token>, the secret would be a refresh token.
// https://github.com/docker/cli/blob/master/docs/reference/commandline/login.md#credential-helper-protocol
public static final String OAUTH2_TOKEN_USER_NAME = "<token>";
/**
* Gets a {@link Credential} configured with a username and password.
*
* @param username the username
* @param password the password
* @return a new {@link Credential}
*/
public static Credential from(String username, String password) {
return new Credential(username, password);
}
private final String username;
private final String password;
private Credential(String username, String password) {
this.username = username;
this.password = password;
}
/**
* Gets the username.
*
* @return the username
*/
public String getUsername() {
return username;
}
/**
* Gets the password.
*
* @return the password
*/
public String getPassword() {
return password;
}
/**
* Check whether this credential is an OAuth 2.0 refresh token.
*
* @return true if this credential is an OAuth 2.0 refresh token.
*/
public boolean isOAuth2RefreshToken() {
return OAUTH2_TOKEN_USER_NAME.equals(username);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof Credential)) {
return false;
}
Credential otherCredential = (Credential) other;
return username.equals(otherCredential.username) && password.equals(otherCredential.password);
}
@Override
public int hashCode() {
return Objects.hash(username, password);
}
@Override
public String toString() {
return username + ":" + password;
}
}
|
Java
|
package com.github.agourlay.cornichon.steps.wrapped
import com.github.agourlay.cornichon.core._
import com.github.agourlay.cornichon.steps.regular.assertStep.{ AssertStep, GenericEqualityAssertion }
import com.github.agourlay.cornichon.testHelpers.CommonTestSuite
import munit.FunSuite
import scala.concurrent.duration._
class WithinStepSpec extends FunSuite with CommonTestSuite {
test("controls duration of 'within' wrapped steps") {
val d = 50.millis
val nested = AssertStep(
"possible random value step",
_ => {
Thread.sleep(10)
GenericEqualityAssertion(true, true)
}
) :: Nil
val withinStep = WithinStep(nested, d)
val s = Scenario("scenario with Within", withinStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
assert(res.isSuccess)
}
test("fails if duration of 'within' is exceeded") {
val d = 10.millis
val nested = AssertStep(
"possible random value step",
_ => {
Thread.sleep(20)
GenericEqualityAssertion(true, true)
}
) :: Nil
val withinStep = WithinStep(nested, d)
val s = Scenario("scenario with Within", withinStep :: Nil)
val res = awaitIO(ScenarioRunner.runScenario(Session.newEmpty)(s))
assert(!res.isSuccess)
}
}
|
Java
|
const browserSync = require('../../../');
const utils = require('../utils');
const register = require('../../../dist/plugins/clients').ClientEvents.register;
const assert = require('chai').assert;
describe('Client connection stream', function () {
it('does not have duplicates', function (done) {
browserSync.create({}).subscribe(function (bs) {
const client = utils.getClientSocket(bs);
client.emit(register, utils.getClient('123456'));
client.emit(register, utils.getClient('123456'));
bs.clients$.skip(1)
.take(2)
.toArray()
.subscribe(function (clients) {
assert.equal(clients[0].size, 1);
assert.equal(clients[1].size, 1);
const jsClients1 = clients[0].toList().toJS();
const jsClients2 = clients[1].toList().toJS();
assert.equal(jsClients1[0].id, '123456');
assert.equal(jsClients2[0].id, '123456');
bs.cleanup();
done();
}, function (err) {done(err)});
});
});
it('allows unique clients', function (done) {
browserSync.create({}).subscribe(function (bs) {
const client = utils.getClientSocket(bs);
client.emit(register, utils.getClient('xyz'));
client.emit(register, utils.getClient('zxy'));
bs.clients$.skip(1)
.take(2)
.toArray()
.subscribe(function (clients) {
assert.equal(clients[0].size, 1);
assert.equal(clients[1].size, 2);
const jsClients1 = clients[0].toList().toJS();
const jsClients2 = clients[1].toList().toJS();
assert.equal(jsClients1[0].id, 'xyz');
assert.equal(jsClients2[0].id, 'xyz');
assert.equal(jsClients2[1].id, 'zxy');
bs.cleanup();
done();
}, function(err) { done(err) });
});
});
it('allows unique clients (stress)', function (done) {
browserSync.create({}).subscribe(function (bs) {
for (var i = 1, n = 51; i < n; i += 1) {
utils.getClientSocket(bs).emit(register, utils.getClient('id-' + i));
}
bs.clients$.skip(1)
.take(50)
.toArray()
.subscribe(function (clients) {
assert.equal(clients[49].size, 50);
assert.ok(clients[49].get('id-40'));
assert.equal(clients[49].get('id-40').get('id'), 'id-40');
bs.cleanup();
done();
}, function(err) { done(err) });
});
});
});
|
Java
|
using Lucene.Net.Diagnostics;
using System.Runtime.CompilerServices;
namespace Lucene.Net.Util.Fst
{
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using DataInput = Lucene.Net.Store.DataInput;
using DataOutput = Lucene.Net.Store.DataOutput;
/// <summary>
/// An FST <see cref="Outputs{T}"/> implementation, holding two other outputs.
/// <para/>
/// @lucene.experimental
/// </summary>
public class PairOutputs<A, B> : Outputs<PairOutputs<A, B>.Pair>
{
private readonly Pair NO_OUTPUT;
private readonly Outputs<A> outputs1;
private readonly Outputs<B> outputs2;
/// <summary>
/// Holds a single pair of two outputs. </summary>
public class Pair
{
public A Output1 { get; private set; }
public B Output2 { get; private set; }
// use newPair
internal Pair(A output1, B output2)
{
this.Output1 = output1;
this.Output2 = output2;
}
public override bool Equals(object other)
{
// LUCENENET specific - simplified expression
return ReferenceEquals(other, this) || (other is Pair pair && Output1.Equals(pair.Output1) && Output2.Equals(pair.Output2));
}
public override int GetHashCode()
{
return Output1.GetHashCode() + Output2.GetHashCode();
}
}
public PairOutputs(Outputs<A> outputs1, Outputs<B> outputs2)
{
this.outputs1 = outputs1;
this.outputs2 = outputs2;
NO_OUTPUT = new Pair(outputs1.NoOutput, outputs2.NoOutput);
}
/// <summary>
/// Create a new <see cref="Pair"/> </summary>
public virtual Pair NewPair(A a, B b)
{
if (a.Equals(outputs1.NoOutput))
{
a = outputs1.NoOutput;
}
if (b.Equals(outputs2.NoOutput))
{
b = outputs2.NoOutput;
}
if (a.Equals(outputs1.NoOutput) && b.Equals(outputs2.NoOutput))
{
return NO_OUTPUT;
}
else
{
var p = new Pair(a, b);
if (Debugging.AssertsEnabled) Debugging.Assert(Valid(p));
return p;
}
}
// for assert
private bool Valid(Pair pair)
{
bool noOutput1 = pair.Output1.Equals(outputs1.NoOutput);
bool noOutput2 = pair.Output2.Equals(outputs2.NoOutput);
if (noOutput1 && !pair.Output1.Equals(outputs1.NoOutput))
{
return false;
}
if (noOutput2 && !pair.Output2.Equals(outputs2.NoOutput))
{
return false;
}
if (noOutput1 && noOutput2)
{
if (!pair.Equals(NO_OUTPUT))
{
return false;
}
else
{
return true;
}
}
else
{
return true;
}
}
public override Pair Common(Pair pair1, Pair pair2)
{
if (Debugging.AssertsEnabled)
{
Debugging.Assert(Valid(pair1));
Debugging.Assert(Valid(pair2));
}
return NewPair(outputs1.Common(pair1.Output1, pair2.Output1), outputs2.Common(pair1.Output2, pair2.Output2));
}
public override Pair Subtract(Pair output, Pair inc)
{
if (Debugging.AssertsEnabled)
{
Debugging.Assert(Valid(output));
Debugging.Assert(Valid(inc));
}
return NewPair(outputs1.Subtract(output.Output1, inc.Output1), outputs2.Subtract(output.Output2, inc.Output2));
}
public override Pair Add(Pair prefix, Pair output)
{
if (Debugging.AssertsEnabled)
{
Debugging.Assert(Valid(prefix));
Debugging.Assert(Valid(output));
}
return NewPair(outputs1.Add(prefix.Output1, output.Output1), outputs2.Add(prefix.Output2, output.Output2));
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public override void Write(Pair output, DataOutput writer)
{
if (Debugging.AssertsEnabled) Debugging.Assert(Valid(output));
outputs1.Write(output.Output1, writer);
outputs2.Write(output.Output2, writer);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public override Pair Read(DataInput @in)
{
A output1 = outputs1.Read(@in);
B output2 = outputs2.Read(@in);
return NewPair(output1, output2);
}
public override Pair NoOutput => NO_OUTPUT;
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public override string OutputToString(Pair output)
{
if (Debugging.AssertsEnabled) Debugging.Assert(Valid(output));
return "<pair:" + outputs1.OutputToString(output.Output1) + "," + outputs2.OutputToString(output.Output2) + ">";
}
public override string ToString()
{
return "PairOutputs<" + outputs1 + "," + outputs2 + ">";
}
}
}
|
Java
|
# Orchis lutea Dulac SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
Java
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_112) on Tue Sep 12 14:31:26 MST 2017 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>Uses of Interface org.wildfly.swarm.config.management.security_realm.LdapAuthorizationSupplier (BOM: * : All 2017.9.5 API)</title>
<meta name="date" content="2017-09-12">
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Interface org.wildfly.swarm.config.management.security_realm.LdapAuthorizationSupplier (BOM: * : All 2017.9.5 API)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2017.9.5</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/wildfly/swarm/config/management/security_realm/class-use/LdapAuthorizationSupplier.html" target="_top">Frames</a></li>
<li><a href="LdapAuthorizationSupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Interface org.wildfly.swarm.config.management.security_realm.LdapAuthorizationSupplier" class="title">Uses of Interface<br>org.wildfly.swarm.config.management.security_realm.LdapAuthorizationSupplier</h2>
</div>
<div class="classUseContainer">
<ul class="blockList">
<li class="blockList">
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing packages, and an explanation">
<caption><span>Packages that use <a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">LdapAuthorizationSupplier</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Package</th>
<th class="colLast" scope="col">Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><a href="#org.wildfly.swarm.config.management">org.wildfly.swarm.config.management</a></td>
<td class="colLast"> </td>
</tr>
</tbody>
</table>
</li>
<li class="blockList">
<ul class="blockList">
<li class="blockList"><a name="org.wildfly.swarm.config.management">
<!-- -->
</a>
<h3>Uses of <a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">LdapAuthorizationSupplier</a> in <a href="../../../../../../../org/wildfly/swarm/config/management/package-summary.html">org.wildfly.swarm.config.management</a></h3>
<table class="useSummary" border="0" cellpadding="3" cellspacing="0" summary="Use table, listing methods, and an explanation">
<caption><span>Methods in <a href="../../../../../../../org/wildfly/swarm/config/management/package-summary.html">org.wildfly.swarm.config.management</a> with parameters of type <a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">LdapAuthorizationSupplier</a></span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tbody>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../../../../org/wildfly/swarm/config/management/SecurityRealm.html" title="type parameter in SecurityRealm">T</a></code></td>
<td class="colLast"><span class="typeNameLabel">SecurityRealm.</span><code><span class="memberNameLink"><a href="../../../../../../../org/wildfly/swarm/config/management/SecurityRealm.html#ldapAuthorization-org.wildfly.swarm.config.management.security_realm.LdapAuthorizationSupplier-">ldapAuthorization</a></span>(<a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">LdapAuthorizationSupplier</a> supplier)</code>
<div class="block">Configuration to use LDAP as the user repository.</div>
</td>
</tr>
</tbody>
</table>
</li>
</ul>
</li>
</ul>
</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/wildfly/swarm/config/management/security_realm/LdapAuthorizationSupplier.html" title="interface in org.wildfly.swarm.config.management.security_realm">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../../../../../../../overview-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">WildFly Swarm API, 2017.9.5</div>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/wildfly/swarm/config/management/security_realm/class-use/LdapAuthorizationSupplier.html" target="_top">Frames</a></li>
<li><a href="LdapAuthorizationSupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2017 <a href="http://www.jboss.org">JBoss by Red Hat</a>. All rights reserved.</small></p>
</body>
</html>
|
Java
|
#!/usr/bin/env python
"""A flow to run checks for a host."""
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib.checks import checks
from grr.proto import flows_pb2
class CheckFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.CheckFlowArgs
class CheckRunner(flow.GRRFlow):
"""This flow runs checks on a host.
CheckRunner:
- Identifies what checks should be run for a host.
- Identifies the artifacts that need to be collected to perform those checks.
- Orchestrates collection of the host data.
- Routes host data to the relevant checks.
- Returns check data ready for reporting.
"""
friendly_name = "Run Checks"
category = "/Checks/"
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["MapArtifactData"])
def Start(self):
"""."""
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("knowledge_base",
client.Get(client.Schema.KNOWLEDGE_BASE))
self.state.Register("labels", client.GetLabels())
self.state.Register("artifacts_wanted", set())
self.state.Register("artifacts_fetched", set())
self.state.Register("checks_run", [])
self.state.Register("checks_with_findings", [])
self.state.Register("results_store", None)
self.state.Register("host_data", {})
self.CallState(next_state="MapArtifactData")
@flow.StateHandler(next_state=["AddResponses", "RunChecks"])
def MapArtifactData(self, responses):
"""Get processed data, mapped to artifacts."""
self.state.artifacts_wanted = checks.CheckRegistry.SelectArtifacts(
os=self.state.knowledge_base.os)
# Fetch Artifacts and map results to the artifacts that generated them.
# This is an inefficient collection, but necessary because results need to
# be mapped to the originating artifact. An alternative would be to have
# rdfvalues labeled with originating artifact ids.
for artifact_id in self.state.artifacts_wanted:
self.CallFlow("ArtifactCollectorFlow", artifact_list=[artifact_id],
request_data={"artifact_id": artifact_id},
next_state="AddResponses")
self.CallState(next_state="RunChecks")
@flow.StateHandler()
def AddResponses(self, responses):
artifact_id = responses.request_data["artifact_id"]
# TODO(user): Check whether artifact collection succeeded.
self.state.host_data[artifact_id] = list(responses)
@flow.StateHandler(next_state=["Done"])
def RunChecks(self, responses):
if not responses.success:
raise RuntimeError("Checks did not run successfully.")
# Hand host data across to checks. Do this after all data has been collected
# in case some checks require multiple artifacts/results.
for finding in checks.CheckHost(self.state.host_data,
os=self.state.knowledge_base.os):
self.state.checks_run.append(finding.check_id)
if finding.anomaly:
self.state.checks_with_findings.append(finding.check_id)
self.SendReply(finding)
|
Java
|
/* Copyright 2017 - 2022 R. Thomas
* Copyright 2017 - 2022 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <string>
#include <sstream>
#include "LIEF/MachO/hash.hpp"
#include "LIEF/MachO/RelocationObject.hpp"
#include "pyMachO.hpp"
namespace LIEF {
namespace MachO {
template<class T>
using getter_t = T (RelocationObject::*)(void) const;
template<class T>
using setter_t = void (RelocationObject::*)(T);
template<>
void create<RelocationObject>(py::module& m) {
py::class_<RelocationObject, Relocation>(m, "RelocationObject",
R"delim(
Class that represents a relocation presents in the MachO object
file (``.o``). Usually, this kind of relocation is found in the :class:`lief.MachO.Section`.
)delim")
.def_property("value",
static_cast<getter_t<int32_t>>(&RelocationObject::value),
static_cast<setter_t<int32_t>>(&RelocationObject::value),
R"delim(
For **scattered** relocations, the address of the relocatable expression
for the item in the file that needs to be updated if the address is changed.
For relocatable expressions with the difference of two section addresses,
the address from which to subtract (in mathematical terms, the minuend)
is contained in the first relocation entry and the address to subtract (the subtrahend)
is contained in the second relocation entry.",
)delim")
.def_property_readonly("is_scattered",
&RelocationObject::is_scattered,
"``True`` if the relocation is a scattered one")
.def("__eq__", &RelocationObject::operator==)
.def("__ne__", &RelocationObject::operator!=)
.def("__hash__",
[] (const RelocationObject& relocation) {
return Hash::hash(relocation);
})
.def("__str__",
[] (const RelocationObject& relocation)
{
std::ostringstream stream;
stream << relocation;
std::string str = stream.str();
return str;
});
}
}
}
|
Java
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_TF_TFL_TRANSLATE_CL_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_TF_TFL_TRANSLATE_CL_H_
// This file contains command-line options aimed to provide the parameters
// required by the TensorFlow Graph(Def) to TF Lite Flatbuffer conversion. It is
// only intended to be included by binaries.
#include <string>
#include "llvm/Support/CommandLine.h"
// The commandline options are defined in LLVM style, so the caller should
// use llvm::InitLLVM to initilize the options.
//
// Please see the implementation file for documentation of details of these
// options.
// TODO(jpienaar): Revise the command line option parsing here.
extern llvm::cl::opt<std::string> input_file_name;
extern llvm::cl::opt<std::string> output_file_name;
extern llvm::cl::opt<bool> use_splatted_constant;
extern llvm::cl::opt<bool> input_mlir;
extern llvm::cl::opt<bool> output_mlir;
extern llvm::cl::list<std::string> extra_opdefs;
extern llvm::cl::opt<bool> emit_quant_adaptor_ops;
extern llvm::cl::opt<std::string> quant_stats_file_name;
#endif // TENSORFLOW_COMPILER_MLIR_LITE_TF_TFL_TRANSLATE_CL_H_
|
Java
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>Ilmurah</title>
<link rel="shortcut icon" href="img/icon.jpg" />
<!-- Bootstrap Core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<link rel="stylesheet" href="css/main.css">
<!-- Custom Fonts -->
<link href="font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="http://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700,300italic,400italic,700italic" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<script type="text/javascript">
</script>
<div class="container-fluid">
<div class="row">
<a id="menu-toggle" href="#" class="btn btn-dark btn-lg toggle"><i class="fa fa-bars"></i></a>
<div class="col-md-3 sidebar" id="sidebar-wrapper">
<a id="menu-close" href="#" class="btn btn-light btn-lg pull-right toggle-close"><i class="fa fa-times"></i></a>
<ul class="nav nav-stacked nav-pills">
<a href="index.html" class="sidebar-logo"><img src="img/logo.png" class="img-responsive"></a>
<br/><br/>
<li>
<div class="sidebar-searchbar">
<div class="sidebar-searchbar-form">
<input type="text" class="form-control" placeholder="Search beasiswa...">
</div>
<div class="sidebar-searchbar-button">
<a href="search_result.html" class="btn btn-primary">GO</a>
</div>
</div>
</li>
<br/>
<li>
<a href="mahasiswa.html" >Home Page</a>
</li>
<li>
<a href="advancedsearch.html" >Pencarian Beasiswa Detil</a>
</li>
<li><a href="my_profile.html">Profil</a></li>
<li><a href="index.html">Logout</a></li>
</ul>
</div>
<div class="col-md-9 content-container">
<div class="page-content-container">
<!-- About -->
<section id="about" class="about">
<div class="container-fluid meta-originally-container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2">
<div class="row">
<div class="col-md-10 col-md-offset-2">
<h2>Didi Sumardi</h2>
</div>
</div>
<div class="row">
<div class="col-md-2">
<img src="img/profile.jpg" class="img-circle img-responsive" alt="" />
</div>
<div class="col-md-10">
<h3>NIM</h3>
<p>10016002 <span class="label label-success">Tersambung dengan ol.akademik.itb.ac.id</span></p>
<h3>Alamat Lengkap</h3>
<p>Jalan Terama 35 03/04 Bandung</p>
<h3>Alamat Profil Eksternal</h3>
<p>https://www.linkedin.com/in/didi.s</p>
<p>
<a href="edit_my_profile.html" class="btn btn-default">Ubah</a>
</p>
</div>
</div>
</div>
</div>
<!-- /.row -->
</div>
<!-- /.container -->
</section>
</div>
<!-- Footer -->
<footer id="contact" class="ilmurah-footer">
<div class="container-fluid">
<div class="row">
<div class="col-lg-10 col-lg-offset-1 text-center">
<h4><strong>Stopdown Startup</strong></h4>
<br/><br/>
<p>Sources:</p>
<ul class="list-unstyled">
<li><a href="http://www.dreamersroadmap.com/"><p>DREAMer’s Roadmap</p></a>
</li>
<li><a href="http://www.usatoday.com/story/tech/personal/2013/08/09/app-for-finding-college-scholarships/2636505/"><p>Scholly</p></a>
</li>
</ul>
<br>
<hr class="small">
<p class="text-muted">Copyright © Ilmurah 2016</p>
</div>
</div>
</div>
</footer>
</div>
</div>
</div>
<!-- jQuery -->
<script src="js/jquery.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="js/bootstrap.min.js"></script>
<script type="text/javascript">
$("#menu-toggle").hide();
// Closes the sidebar menu
$("#menu-close").click(function(e) {
e.preventDefault();
$("#sidebar-wrapper").toggleClass("active");
$("#menu-toggle").show();
});
// Opens the sidebar menu
$("#menu-toggle").click(function(e) {
e.preventDefault();
$("#sidebar-wrapper").toggleClass("active");
$("#menu-toggle").hide();
});
// Scrolls to the selected menu item on the page
$(function() {
$('a[href*=#]:not([href=#])').click(function() {
if (location.pathname.replace(/^\//, '') == this.pathname.replace(/^\//, '') || location.hostname == this.hostname) {
var target = $(this.hash);
target = target.length ? target : $('[name=' + this.hash.slice(1) + ']');
if (target.length) {
$('html,body').animate({
scrollTop: target.offset().top
}, 1000);
return false;
}
}
});
});
</script>
</body>
</html>
|
Java
|
/*
* Copyright (C) 2016 Mkhytar Mkhoian
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.justplay1.shoppist.interactor.units;
import com.justplay1.shoppist.executor.PostExecutionThread;
import com.justplay1.shoppist.executor.ThreadExecutor;
import com.justplay1.shoppist.models.UnitModel;
import com.justplay1.shoppist.repository.UnitsRepository;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import java.util.Collections;
import java.util.List;
import static com.justplay1.shoppist.ModelUtil.createFakeUnitModel;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
public class UpdateUnitsTest {
private UpdateUnits useCase;
@Mock private ThreadExecutor mockThreadExecutor;
@Mock private PostExecutionThread mockPostExecutionThread;
@Mock private UnitsRepository mockUnitsRepository;
private List<UnitModel> models;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
useCase = new UpdateUnits(mockUnitsRepository, mockThreadExecutor, mockPostExecutionThread);
models = Collections.singletonList(createFakeUnitModel());
useCase.init(models);
}
@Test
public void updateUnitsUseCase_HappyCase() {
useCase.buildUseCaseObservable().subscribe();
verify(mockUnitsRepository).update(models);
verifyNoMoreInteractions(mockUnitsRepository);
verifyZeroInteractions(mockThreadExecutor);
verifyZeroInteractions(mockPostExecutionThread);
}
}
|
Java
|
package fi.rivermouth.talous.auth;
import java.util.ArrayList;
import java.util.List;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.authority.SimpleGrantedAuthority;
import fi.rivermouth.talous.domain.User;
public class UserAuthenticationManager implements AuthenticationManager {
@Override
public Authentication authenticate(Authentication authentication) {
List<GrantedAuthority> grantedAuths = new ArrayList<GrantedAuthority>();
grantedAuths.add(new SimpleGrantedAuthority(User.ROLE));
return new UsernamePasswordAuthenticationToken(authentication.getName(), authentication.getCredentials(), grantedAuths);
}
}
|
Java
|
#!/usr/bin/python3
################################################################################
#
# Copyright 2014 Stjepan Henc <sthenc@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import scipy.io.wavfile as wav
import numpy as np
import copy
class Signal:
# Data loaders
def LoadFromFile(self, file):
self.fs, self.s = wav.read(file)
self.sLength, self.nChans = self.s.shape
def LoadWF(self, waveform, fs):
self.s = waveform
self.fs = fs
self.sLength, self.nChans = self.s.shape
def __init__(self, *args):
#signal properties
self.singlePrecision = 0
self.s = np.array([])
self.fs = 44100
self.sLength = 0
self.nChans = 0
self.weightingFunction = np.hamming #FIXME
#STFT properties
self.S = np.array([])
self.windowLength = 60
self.nfft = 0
self.nfftUtil = 0
self.overlapRatio = 0.5
self.framesPositions = np.array([])
self.nFrames = 0
self.weightingWindow = np.array([])
self.overlap = 0
# Windowing properties
self.sWin = np.array([])
self.sWeights = np.array([])
self.sWin = np.array([])
self.sWeights = np.array([])
if len(args) == 1:
if type(args[0]) == type(''): # it's a filename
self.LoadFromFile(args[0])
elif type(args[0] == type(self)): # copy data from other signal
self.__dict__ = copy.deepcopy(args[0].__dict__)
elif len(args) == 2: # args[0] is a signal, args[1] is sample freq.
self.LoadWF(args(0), args(1))
|
Java
|
USB Armory
==========
In this package is support for using the
[USB Armory](https://inversepath.com/usbarmory.html) hardware with the firmware
transparency demo.
Since the SoC on the hardware already has ROM we can't patch that to root our
trust there, so for now we'll simply use a first-stage EL3 bootloader to
"enforce" the correctness of the proof bundle before chaining to something
(e.g. a full linux image based app) that represents the firmware being made
transparent.
The enforcement code not being in the masked ROM is an obvious shortcoming of this
demo, however given the interesting array of security hardware on this board
it should be possible to use some of that as an alternative trust base.
Storage
-------
> :warning: these are scratch notes, not yet reflective of reality, and so may
> change drastically!
We'll use the µSD card slot of the USB Armory for our purposes.
The SD card will contain:
- our "enforcing" [bootloader](./bootloader)
- the firmware being made discoverable
- a [proof bundle](/binary_transparency/firmware/api/update_package.go)
for the firmware which convinces the bootloader that it _is_ discoverable
and therefore ok to launch.
> :info: the USB Armory is built around an NXP i.MX6 SoC. When booting, the ROM
> loader on this SoC expects to find the first-stage bootloader at the
> 1024th byte of the external storage.
> This allows sufficient space beforehand to store a partition table.
The on-disk partition layout will be:
index | name | size | format | notes
------|------------|---------|--------|-----------------------------------------------
1 | boot | 10M | raw | Must cover disk bytes 1024 onwards as we'll directly write the bootloader here.
2 | proof | 512KB | ext4 | EXT4 filesystem for storing a serialised proof bundle
3 | firmware | 64MB+ | ext4 | EXT4 filesystem containing the bootable firmware image, armory boot config, etc.
### Preparing the SD Card
> :warning: When following the instructions below, be *very sure* you know which
> device is your SD card - if performed with an incorrect device, the instructions below
> can cause data loss!
#### Linux
##### Partition & file-systems
First use the `parted -l` command to figure out which device corresponds to your
SD card.
> :tip: you can run the `parted -l` command twice, once with your SD card
> reader plugged in, and once without to help identify the device.
`/dev/my_sdcard` is used as a placeholder below, you should replace that with
the path for your SD card device.
```bash
sudo parted /dev/my_sdcard
# double (triple!) check we've got the right device:
(parted) print
...
(parted) mklabel msdos
# Create space for the bootloader
(parted) mkpart primary 1KB 10240KB
# Create a partition for the proofs
(parted) mkpart primary ext4 10241KB 10753KB
# Create a partition for the firmware
(parted) mkpart primary ext4 10754KB 100MB
# Check our work:
(parted) unit b
(parted) print
Model: Generic- Micro SD/M2 (scsi)
Disk /dev/sdc: 15931539456B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags:
Number Start End Size Type File system Flags
1 512B 10240511B 10240000B primary lba
2 10240512B 10753023B 512512B primary ext4 lba
3 10753536B 100000255B 89246720B primary ext4 lba
```
Finally, create filesystems on the 2nd and 3rd partitions of our SDCard:
```bash
$ sudo mkfs.ext4 /dev/my_sdcard2 -L proof
$ sudo mkfs.ext4 /dev/my_sdcard3 -L firmware
```
Next you'll build and install the bootloader on the card.
Compiling the bootloader
------------------------
Follow the instructions on the
[tamago-example](https://github.com/f-secure-foundry/tamago-example#Compiling)
site to set up your tool-chain and environment variables.
To compile the bootloader itself, run the following command in the `bootloader`
directory:
```bash
# Note that START_KERNEL corresponds to the offset of the firmware partition,
# and START_PROOF is the offset of the proof partition
make CROSS_COMPILE=arm-none-eabi- TARGET=usbarmory imx BOOT=uSD START_KERNEL=10753536 START_PROOF=10240512 LEN_KERNEL=89246720
```
If successful, this will create a few files - the one we're interested in is
`armory-boot.imx`, this is the "native format" bootloader code for the NXP SoC.
##### Install bootloader
Finally, we'll write the bootloader we just built onto the SD card in the right
place:
```bash
# Note that we're writing to the raw device here NOT the boot partition.
$ sudo dd if=armory-boot.imx of=/dev/myscard bs=512 seek=2 conv=fsync,notrunc
```
Firmware images
---------------
Currently, the bootloader can only chain to either a Linux kernel, or a
bare-metal ELF unikernel (only tested with tamago-example thus far).
There are some invariants which must hold for this chain to work:
1. The `firmware` partition MUST be located at the precise offset mentioned
above.
2. The `firmware` partition MUST be formatted with ext4.
3. The `firmware` partition MUST contain a `/boot` directory with at least the
following contents:
* `armory-boot.conf` - a JSON file which tells the bootloader which files
to load
* Either:
* to boot a Linux Kernel:
* a valid ARM linux Kernel image
* a valid DTB file
* to boot ELF unikernel:
* a valid ARM bare-metal ELF binary/unikernel
Note that the `armory-boot.conf` file also contains SHA256 hashes of
all files referenced, and these MUST be correct.
To aid in the creation of valid firmware images, use the
`[cmd/usbarmory/image_builder/build.sh](/binary_transparency/firmware/cmd/usbarmory/image_builder/build.sh)`
script, e.g.:
```bash
$ ./cmd/usbarmory/image_builder/build.sh -u ./testdata/firmware/usbarmory/example/tamago-example -o /tmp/armory.ext4
/tmp/armory.ext4: Writing to the journal is not supported.
Created image in /tmp/armory.ext4:
-rw-rw-r-- 1 al al 13M Nov 30 10:39 /tmp/armory.ext4
```
This image can be written to the target partition using the following commands:
```bash
# first, log the image
$ go run ./cmd/publisher/ --logtostderr --binary_path /tmp/armory.ext4 --output_path /tmp/update.ota --device="armory"
# then flash the device firmware
$ sudo $(which go) ./cmd/flash_tool \
--logtostderr \
--device=armory \
--update_file /tmp/update.ota \
--armory_proof_mount_point /path/to/mounted/proof/partition
--armory_unikernel_dev /dev/mysdcard3
```
<details>
<summary>Alternative approach using regular shell commands</summary>
Alternatively, if you prefer to see what's going on, you can currently achieve a similar goal with the following
$ sudo dd if=/tmp/armory.ext of=/dev/my_sdcard3 bs=1M conf=fsync
# finally, copy over the proof bundle (assumes /dev/my_sdcard2 is mounted on /mnt/proof)
$ jq '.ProofBundle|@base64d|fromjson' /tmp/update.ota > /tmp/bundle.proof
$ sudo mv /tmp/bundle.proof /mnt/proof/bundle.json
```
</details>
### Linux
> :frog: The [Armory Debian Base Image](https://github.com/f-secure-foundry/usbarmory-debian-base_image/releases)
> is a good source for the kernel (zImage) and dtb files.
>
> You can decompress and mount the image to access the files like so:
> ```bash
> # decompress image
> $ xz -d usbarmory-mark-two-usd-debian_buster-base_image-20200714.raw.xz
> # mount image with loopback:
> # note the offset parameter below - the raw file is a complete disk image, this
> # offset is the first byte of the root partition (you can use fdisk or parted
> # on the raw file to view this yourself)
> $ sudo mount -o loop,ro,offet=5242880 /home/al/Downloads/usbarmory-mark-two-usd-debian_buster-base_image-20201020.raw /mnt
> # the files we're interested in are now visible in /mnt/boot:
> $ ls -l /mnt/boot
> total 8148
> -rw-r--r-- 1 root root 99319 Oct 20 17:13 config-5.4.72-0-usbarmory
> lrwxrwxrwx 1 root root 21 Oct 20 17:14 imx6ull-usbarmory.dtb -> imx6ulz-usbarmory.dtb
> -rw-r--r-- 1 root root 19938 Oct 20 17:14 imx6ulz-usbarmory-default-5.4.72-0.dtb
> lrwxrwxrwx 1 root root 38 Oct 20 17:14 imx6ulz-usbarmory.dtb -> imx6ulz-usbarmory-default-5.4.72-0.dtb
> -rw-r--r-- 1 root root 1488951 Oct 20 17:13 System.map-5.4.72-0-usbarmory
> lrwxrwxrwx 1 root root 25 Oct 20 17:14 zImage -> zImage-5.4.72-0-usbarmory
> -rwxr-xr-x 1 root root 6726952 Oct 20 17:13 zImage-5.4.72-0-usbarmory
> ```
An example `armory-boot.conf` file configured to boot a Linux kernel is:
```json
{
"kernel": [
"/boot/zImage-5.4.51-0-usbarmory",
"aceb3514d5ba6ac591a7d5f2cad680e83a9f848d19763563da8024f003e927c7"
],
"dtb": [
"/boot/imx6ulz-usbarmory-default-5.4.51-0.dtb",
"60d4fe465ef60042293f5723bf4a001d8e75f26e517af2b55e6efaef9c0db1f6"
],
"cmdline": "console=ttymxc1,115200 root=/dev/sdc3 rootwait rw"
}
```
TODO(al): Consider wrapping this up into a script.
### ELF unikernel
> :frog: A good sample unikernel is the
> [tamago-example](https://github.com/f-secure-foundry/tamago-example)
> application.
An example `armory-boot.conf` file configured to boot an ELF unikernel is:
```json
{
"unikernel": [
"/boot/tamago-example",
"aceb3514d5ba6ac591a7d5f2cad680e83a9f848d19763563da8024f003e927c7"
]
}
```
Booting
-------
If all is well, booting the USB Armory using the debug accessory will show
console output like so:
```
Terminal ready
�armory-boot: starting kernel image@80800000 params@87000000
Booting Linux on physical CPU 0x0
Linux version 5.4.72-0 (usbarmory@f-secure-foundry) (gcc version 7.5.0 (Ubuntu/Linaro 7.5.0-3ubuntu1~18.04)) #1 PREEMPT Tue Oct 20 16:03:37 UTC 2020
CPU: ARMv7 Processor [410fc075] revision 5 (ARMv7), cr=10c53c7d
CPU: div instructions available: patching division code
CPU: PIPT / VIPT nonaliasing data cache, VIPT aliasing instruction cache
OF: fdt: Machine model: F-Secure USB armory Mk II
Memory policy: Data cache writeback
CPU: All CPU(s) started in SVC mode.
Built 1 zonelists, mobility grouping on. Total pages: 130048
Kernel command line: console=ttymxc1,115200 root=/dev/sda3 rootwait rw
Dentry cache hash table entries: 65536 (order: 6, 262144 bytes, linear)
...
```
Firmware Measurement
--------------------
The 'firmware measurement' hash for the USB Armory is defined to be the SHA256
hash of the raw bytes of the `ext4` **filesystem image** stored in the
'firmware' partition of the SD Card.
Note that this _may well_ be a different size than the partition itself.
|
Java
|
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <time.h>
#include "../lib/includes.h"
#include "../lib/blacklist.h"
#include "../lib/logger.h"
#include "../lib/xalloc.h"
#include "iterator.h"
#include "aesrand.h"
#include "shard.h"
#include "state.h"
struct iterator {
cycle_t cycle;
uint8_t num_threads;
shard_t *thread_shards;
uint8_t *complete;
pthread_mutex_t mutex;
uint32_t curr_threads;
};
void shard_complete(uint8_t thread_id, void *arg)
{
iterator_t *it = (iterator_t *) arg;
assert(thread_id < it->num_threads);
pthread_mutex_lock(&it->mutex);
it->complete[thread_id] = 1;
it->curr_threads--;
shard_t *s = &it->thread_shards[thread_id];
zsend.sent += s->state.sent;
zsend.blacklisted += s->state.blacklisted;
zsend.whitelisted += s->state.whitelisted;
zsend.sendto_failures += s->state.failures;
uint8_t done = 1;
for (uint8_t i = 0; done && (i < it->num_threads); ++i) {
done = done && it->complete[i];
}
if (done) {
zsend.finish = now();
zsend.complete = 1;
zsend.first_scanned = it->thread_shards[0].state.first_scanned;
}
pthread_mutex_unlock(&it->mutex);
}
iterator_t* iterator_init(uint8_t num_threads, uint8_t shard,
uint8_t num_shards)
{
uint64_t num_addrs = blacklist_count_allowed();
iterator_t *it = xmalloc(sizeof(struct iterator));
const cyclic_group_t *group = get_group(num_addrs);
if (num_addrs > (1LL << 32)) {
zsend.max_index = 0xFFFFFFFF;
} else {
zsend.max_index = (uint32_t) num_addrs;
}
it->cycle = make_cycle(group, zconf.aes);
it->num_threads = num_threads;
it->curr_threads = num_threads;
it->thread_shards = xcalloc(num_threads, sizeof(shard_t));
it->complete = xcalloc(it->num_threads, sizeof(uint8_t));
pthread_mutex_init(&it->mutex, NULL);
for (uint8_t i = 0; i < num_threads; ++i) {
shard_init(&it->thread_shards[i],
shard,
num_shards,
i,
num_threads,
&it->cycle,
shard_complete,
it
);
}
zconf.generator = it->cycle.generator;
return it;
}
uint32_t iterator_get_sent(iterator_t *it)
{
uint32_t sent = 0;
for (uint8_t i = 0; i < it->num_threads; ++i) {
sent += it->thread_shards[i].state.sent;
}
return sent;
}
shard_t* get_shard(iterator_t *it, uint8_t thread_id)
{
assert(thread_id < it->num_threads);
return &it->thread_shards[thread_id];
}
uint32_t iterator_get_curr_send_threads(iterator_t *it)
{
assert(it);
return it->curr_threads;
}
|
Java
|
/*
* Copyright 2012 Rackspace
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "virgo.h"
#include "virgo_paths.h"
#include "virgo_error.h"
#include "virgo__types.h"
#include "virgo__conf.h"
#include "virgo__util.h"
#include <assert.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifndef _WIN32
#include <limits.h>
#include <unistd.h>
#include <errno.h>
#endif
virgo_error_t*
virgo_conf_service_name(virgo_t *v, const char *name)
{
if (v->service_name) {
free((void*)v->service_name);
}
v->service_name = strdup(name);
return VIRGO_SUCCESS;
}
virgo_error_t*
virgo_conf_lua_bundle_path(virgo_t *v, const char *path)
{
if (v->lua_bundle_path) {
free((void*)v->lua_bundle_path);
}
v->lua_bundle_path = strdup(path);
return VIRGO_SUCCESS;
}
virgo_error_t*
virgo_conf_lua_load_path(virgo_t *v, const char *path)
{
if (v->lua_load_path) {
free((void*)v->lua_load_path);
}
v->lua_load_path = strdup(path);
return VIRGO_SUCCESS;
}
virgo_error_t*
virgo_conf_args(virgo_t *v)
{
virgo_error_t *err;
const char *arg;
char path[VIRGO_PATH_MAX];
short forced_zip = FALSE;
arg = virgo__argv_get_value(v, "-z", "--zip");
if (arg != NULL) {
err = virgo_conf_lua_load_path(v, arg);
if (err) {
return err;
}
forced_zip = TRUE;
v->try_upgrade = FALSE;
}
if (virgo__argv_has_flag(v, "-o", "--no-upgrade")) {
v->try_upgrade = FALSE;
}
if (virgo__argv_has_flag(v, "-r", "--exit-on-upgrade")) {
v->exit_on_upgrade = TRUE;
}
if (virgo__argv_has_flag(v, "-p", "--restart-sysv-on-upgrade")) {
v->restart_on_upgrade = TRUE;
}
arg = virgo__argv_get_value(v, "-l", "--logfile");
if (arg != NULL) {
v->log_path = strdup(arg);
}
if (!forced_zip) {
arg = virgo__argv_get_value(v, "-b", "--bundle-path");
if (arg) {
virgo_conf_lua_bundle_path(v, arg);
}
/* bundle filename */
err = virgo__paths_get(v, VIRGO_PATH_BUNDLE, path, sizeof(path));
if (err) {
return err;
}
err = virgo_conf_lua_load_path(v, path);
if (err) {
return err;
}
}
return VIRGO_SUCCESS;
}
const char*
virgo_conf_get(virgo_t *v, const char *key)
{
return virgo__conf_get(v, key);
}
static void
nuke_newlines(char *p)
{
size_t i;
size_t l = strlen(p);
for (i = 0; i < l; i++) {
if (p[i] == '\n') {
p[i] = '\0';
}
if (p[i] == '\r') {
p[i] = '\0';
}
}
}
static char*
next_chunk(char **x_p)
{
char *p = *x_p;
while (isspace(p[0])) { p++;};
nuke_newlines(p);
*x_p = p;
return strdup(p);
}
static void
conf_insert_node_to_table(virgo_t *v, const char *key, const char *value)
{
lua_pushstring(v->L, key);
lua_pushstring(v->L, value);
lua_settable(v->L, -3);
}
static void
conf_parse(virgo_t *v, FILE *fp)
{
char buf[8096];
char *p = NULL;
while ((p = fgets(buf, sizeof(buf), fp)) != NULL) {
char *key;
virgo_conf_t *node;
/* comment lines */
if (p[0] == '#') {
continue;
}
while (isspace(p[0])) { p++;};
if (strlen(p) == 0) {
continue;
}
/* Insert into list */
node = calloc(1, sizeof(*node));
node->next = v->config;
v->config = node;
/* calculate key/value pairs */
key = next_chunk(&p);
p = key;
while(!isspace(p[0])) { p++;};
*p = '\0'; /* null terminate key */
node->key = strdup(key);
p++;
while(isspace(p[0])) { p++;};
node->value = strdup(p);
free(key);
conf_insert_node_to_table(v, node->key, node->value);
}
}
const char*
virgo__conf_get(virgo_t *v, const char *key)
{
virgo_conf_t *p = v->config;
if (strcmp("lua_load_path", key) == 0) {
return v->lua_load_path;
}
while (p) {
if (strcmp(p->key, key) == 0) {
return p->value;
}
p = p->next;
}
return NULL;
}
void
virgo__conf_destroy(virgo_t *v)
{
virgo_conf_t *p = v->config, *t;
while (p) {
t = p->next;
free((void*)p->key);
free((void*)p->value);
free(p);
p = t;
}
v->config = NULL;
}
static virgo_error_t*
virgo__conf_get_path(virgo_t *v, const char **p_path)
{
#ifdef _WIN32
char *programdata;
const char *path;
path = virgo__argv_get_value(v, "-c", "--config");
if (path == NULL) {
char gen_path[512];
programdata = getenv("ProgramData");
if (programdata == NULL) {
return virgo_error_create(VIRGO_EINVAL, "Unable to get environment variable: \"ProgramData\"\n");
}
sprintf(gen_path, "%s\\%s\\config\\%s",
programdata,
VIRGO_DEFAULT_CONFIG_WINDOWS_DIRECTORY,
VIRGO_DEFAULT_CONFIG_FILENAME);
*p_path = strdup(gen_path);
return VIRGO_SUCCESS;
}
*p_path = strdup(path);
return VIRGO_SUCCESS;
#else /* !_WIN32 */
char *path;
char buffer[PATH_MAX];
int count;
path = (char*) virgo__argv_get_value(v, "-c", "--config");
if (path == NULL) {
virgo__paths_get(v, VIRGO_PATH_CONFIG_DIR, buffer, sizeof(buffer));
count = strlen(buffer) + strlen(VIRGO_DEFAULT_CONFIG_FILENAME) + strlen(SEP) + 1;
*p_path = (char*) malloc(count);
snprintf((char*) *p_path, count, "%s%s%s", buffer, SEP, VIRGO_DEFAULT_CONFIG_FILENAME);
return VIRGO_SUCCESS;
}
*p_path = strdup(path);
return VIRGO_SUCCESS;
#endif
}
virgo_error_t*
virgo__conf_init(virgo_t *v)
{
virgo_error_t* err;
FILE *fp;
const char *path;
err = virgo__conf_get_path(v, &path);
if (err) {
return err;
}
/* destroy config if already read */
if (v->config) {
virgo__conf_destroy(v);
}
/* put config in virgo.config table */
fp = fopen(path, "r");
if (fp) {
lua_getglobal(v->L, "virgo");
lua_pushstring(v->L, "config");
lua_newtable(v->L);
conf_parse(v, fp);
lua_settable(v->L, -3);
fclose(fp);
}
lua_pushstring(v->L, "config_path");
lua_pushstring(v->L, path);
lua_settable(v->L, -3);
free((void*)path);
return VIRGO_SUCCESS;
}
|
Java
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.rest.action.admin.indices;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.AliasMetadata;
import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.RestBuilderListener;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.HEAD;
/**
* The REST handler for get alias and head alias APIs.
*/
public class RestGetAliasesAction extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(
new Route(GET, "/_alias"),
new Route(GET, "/_aliases"),
new Route(GET, "/_alias/{name}"),
new Route(HEAD, "/_alias/{name}"),
new Route(GET, "/{index}/_alias"),
new Route(HEAD, "/{index}/_alias"),
new Route(GET, "/{index}/_alias/{name}"),
new Route(HEAD, "/{index}/_alias/{name}"));
}
@Override
public String getName() {
return "get_aliases_action";
}
static RestResponse buildRestResponse(boolean aliasesExplicitlyRequested, String[] requestedAliases,
ImmutableOpenMap<String, List<AliasMetadata>> responseAliasMap,
Map<String, List<DataStreamAlias>> dataStreamAliases, XContentBuilder builder) throws Exception {
final Set<String> indicesToDisplay = new HashSet<>();
final Set<String> returnedAliasNames = new HashSet<>();
for (final ObjectObjectCursor<String, List<AliasMetadata>> cursor : responseAliasMap) {
for (final AliasMetadata aliasMetadata : cursor.value) {
if (aliasesExplicitlyRequested) {
// only display indices that have aliases
indicesToDisplay.add(cursor.key);
}
returnedAliasNames.add(aliasMetadata.alias());
}
}
// compute explicitly requested aliases that have are not returned in the result
final SortedSet<String> missingAliases = new TreeSet<>();
// first wildcard index, leading "-" as an alias name after this index means
// that it is an exclusion
int firstWildcardIndex = requestedAliases.length;
for (int i = 0; i < requestedAliases.length; i++) {
if (Regex.isSimpleMatchPattern(requestedAliases[i])) {
firstWildcardIndex = i;
break;
}
}
for (int i = 0; i < requestedAliases.length; i++) {
if (Metadata.ALL.equals(requestedAliases[i]) || Regex.isSimpleMatchPattern(requestedAliases[i])
|| (i > firstWildcardIndex && requestedAliases[i].charAt(0) == '-')) {
// only explicitly requested aliases will be called out as missing (404)
continue;
}
// check if aliases[i] is subsequently excluded
int j = Math.max(i + 1, firstWildcardIndex);
for (; j < requestedAliases.length; j++) {
if (requestedAliases[j].charAt(0) == '-') {
// this is an exclude pattern
if (Regex.simpleMatch(requestedAliases[j].substring(1), requestedAliases[i])
|| Metadata.ALL.equals(requestedAliases[j].substring(1))) {
// aliases[i] is excluded by aliases[j]
break;
}
}
}
if (j == requestedAliases.length) {
// explicitly requested aliases[i] is not excluded by any subsequent "-" wildcard in expression
if (false == returnedAliasNames.contains(requestedAliases[i])) {
// aliases[i] is not in the result set
missingAliases.add(requestedAliases[i]);
}
}
}
final RestStatus status;
builder.startObject();
{
if (missingAliases.isEmpty()) {
status = RestStatus.OK;
} else {
status = RestStatus.NOT_FOUND;
final String message;
if (missingAliases.size() == 1) {
message = String.format(Locale.ROOT, "alias [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
} else {
message = String.format(Locale.ROOT, "aliases [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
}
builder.field("error", message);
builder.field("status", status.getStatus());
}
for (final var entry : responseAliasMap) {
if (aliasesExplicitlyRequested == false || (aliasesExplicitlyRequested && indicesToDisplay.contains(entry.key))) {
builder.startObject(entry.key);
{
builder.startObject("aliases");
{
for (final AliasMetadata alias : entry.value) {
AliasMetadata.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS);
}
}
builder.endObject();
}
builder.endObject();
}
}
for (var entry : dataStreamAliases.entrySet()) {
builder.startObject(entry.getKey());
{
builder.startObject("aliases");
{
for (DataStreamAlias alias : entry.getValue()) {
builder.startObject(alias.getName());
builder.endObject();
}
}
builder.endObject();
}
builder.endObject();
}
}
builder.endObject();
return new BytesRestResponse(status, builder);
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
// The TransportGetAliasesAction was improved do the same post processing as is happening here.
// We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here
// in when 8.0 becomes the new version in the master branch.
final boolean namesProvided = request.hasParam("name");
final String[] aliases = request.paramAsStringArrayOrEmptyIfAll("name");
final GetAliasesRequest getAliasesRequest = new GetAliasesRequest(aliases);
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
getAliasesRequest.indices(indices);
getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions()));
getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local()));
//we may want to move this logic to TransportGetAliasesAction but it is based on the original provided aliases, which will
//not always be available there (they may get replaced so retrieving request.aliases is not quite the same).
return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener<GetAliasesResponse>(channel) {
@Override
public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception {
return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder);
}
});
}
}
|
Java
|
#
# Sample : put() : Put a single request message to a queue
#
require 'wmq'
WMQ::QueueManager.connect(q_mgr_name: 'REID') do |qmgr|
message = WMQ::Message.new
message.data = 'Hello World'
message.descriptor = {
msg_type: WMQ::MQMT_REQUEST,
reply_to_q: 'TEST.REPLY.QUEUE'
}
qmgr.put(q_name: 'TEST.QUEUE', message: message)
end
|
Java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import axios from 'axios';
import { AxiosResponse } from 'axios';
export function getDruidErrorMessage(e: any) {
const data: any = ((e.response || {}).data || {});
return [data.error, data.errorMessage, data.errorClass].filter(Boolean).join(' / ') || e.message;
}
export async function queryDruidRune(runeQuery: Record<string, any>): Promise<any> {
let runeResultResp: AxiosResponse<any>;
try {
runeResultResp = await axios.post("/druid/v2", runeQuery);
} catch (e) {
throw new Error(getDruidErrorMessage(e));
}
return runeResultResp.data;
}
export async function queryDruidSql(sqlQuery: Record<string, any>): Promise<any[]> {
let sqlResultResp: AxiosResponse<any>;
try {
sqlResultResp = await axios.post("/druid/v2/sql", sqlQuery);
} catch (e) {
throw new Error(getDruidErrorMessage(e));
}
return sqlResultResp.data;
}
|
Java
|
using System;
using System.Threading;
using System.Threading.Tasks;
using Moq;
using Moq.Protected;
using Riganti.Utils.Infrastructure.Core;
using Xunit;
#if EFCORE
using Microsoft.EntityFrameworkCore;
using Microsoft.EntityFrameworkCore.Infrastructure;
using Riganti.Utils.Infrastructure.EntityFrameworkCore.Transactions;
#else
using System.Data.Entity;
using Riganti.Utils.Infrastructure.EntityFramework.Transactions;
#endif
#if EFCORE
namespace Riganti.Utils.Infrastructure.EntityFrameworkCore.Tests.UnitOfWork
#else
namespace Riganti.Utils.Infrastructure.EntityFramework.Tests.UnitOfWork
#endif
{
public class EntityFrameworkUnitOfWorkTests
{
[Fact]
public void Commit_CallCommitCoreOnlyIfHasOwnDbContext()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
unitOfWorkRegistryStub.RegisterUnitOfWork(unitOfWorkParent);
var unitOfWorkChildMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkChild = unitOfWorkChildMock.Object)
{
unitOfWorkChild.Commit();
}
unitOfWorkChildMock.Protected().Verify("CommitCore", Times.Never());
unitOfWorkParent.Commit();
}
unitOfWorkParentMock.Protected().Verify("CommitCore", Times.Once());
}
[Fact]
public void Commit_CorrectChildRequestIgnoredBehavior()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
Assert.Throws<ChildCommitPendingException>(() =>
{
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
using (var unitOfWorkChild = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
unitOfWorkChild.Commit();
}
}
});
// test that unit of work provider keeps working after caught exception
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
}
}
}
[Fact]
public void Commit_CorrectMultipleLayeredReuseParentBehavior()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
using (var unitOfWorkParent = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 1st level, context 1
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 2nd level, context 1
using (unitOfWorkProvider.Create(DbContextOptions.AlwaysCreateOwnContext))
{
// 3rd level, context 2
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 4th level, context 2
using (var unitOfWorkParent3 = unitOfWorkProvider.Create(DbContextOptions.AlwaysCreateOwnContext))
{
// 5th level, context 3
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 6th level, context 3
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
}
using (var unitOfWorkChild3 = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 7th level, context 3 commit requested
unitOfWorkChild3.Commit();
}
}
// commit mandatory, context 3 commit pending
unitOfWorkParent3.Commit();
}
}
}
}
// commit optional, no reusing child commit pending
unitOfWorkParent.Commit();
}
}
[Fact]
public void Commit_UOWHasNotParrent_CallCommitCore()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
unitOfWorkParent.Commit();
}
unitOfWorkParentMock.Protected().Verify("CommitCore", Times.Once());
}
[Fact]
public void CommitAsync_UOWHasChild_CallCommitCore()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
unitOfWorkRegistryStub.RegisterUnitOfWork(unitOfWorkParent);
using (var unitOfWorkChild = new EntityFrameworkUnitOfWork(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext))
{
unitOfWorkChild.Commit();
}
unitOfWorkParent.Commit();
}
unitOfWorkParentMock.Protected().Verify("CommitCore", Times.Once());
}
[Fact]
public async Task CommitAsync_CallCommitCoreOnlyIfHasOwnDbContext()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
unitOfWorkRegistryStub.RegisterUnitOfWork(unitOfWorkParent);
var unitOfWorkChildMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkChild = unitOfWorkChildMock.Object)
{
await unitOfWorkChild.CommitAsync();
}
unitOfWorkChildMock.Protected().Verify("CommitAsyncCore", Times.Never(), new CancellationToken());
await unitOfWorkParent.CommitAsync();
}
unitOfWorkParentMock.Protected().Verify("CommitAsyncCore", Times.Once(), new CancellationToken());
}
[Fact]
public async Task CommitAsync_ThrowIfChildCommitRequestedNotFulfilledByRoot()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
await Assert.ThrowsAsync<ChildCommitPendingException>(async () =>
{
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
using (var unitOfWorkChild = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
await unitOfWorkChild.CommitAsync();
}
}
});
// test that unit of work provider keeps working after caught exception
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
}
}
}
[Fact]
public async Task CommitAsync_CorrectMultipleLayeredReuseParentBehavior()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
using (var unitOfWorkParent = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 1st level, context 1
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 2nd level, context 1
using (unitOfWorkProvider.Create(DbContextOptions.AlwaysCreateOwnContext))
{
// 3rd level, context 2
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 4th level, context 2
using (var unitOfWorkParent3 = unitOfWorkProvider.Create(DbContextOptions.AlwaysCreateOwnContext))
{
// 5th level, context 3
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 6th level, context 3
using (unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
}
using (var unitOfWorkChild3 = unitOfWorkProvider.Create(DbContextOptions.ReuseParentContext))
{
// 7th level, context 3 commit requested
await unitOfWorkChild3.CommitAsync();
}
}
// commit mandatory, context 3 commit pending
await unitOfWorkParent3.CommitAsync();
}
}
}
}
// commit optional, no reusing child commit pending
await unitOfWorkParent.CommitAsync();
}
}
[Fact]
public async Task CommitAsync_UOWHasNotParrent_CallCommitCore()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
await unitOfWorkParent.CommitAsync();
}
unitOfWorkParentMock.Protected().Verify("CommitAsyncCore", Times.Once(), new CancellationToken());
}
[Fact]
public async Task Commit_UOWHasChild_CallCommitCore()
{
Func<DbContext> dbContextFactory = () => new Mock<DbContext>().Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWorkParentMock = new Mock<EntityFrameworkUnitOfWork>(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext) { CallBase = true };
using (var unitOfWorkParent = unitOfWorkParentMock.Object)
{
unitOfWorkRegistryStub.RegisterUnitOfWork(unitOfWorkParent);
using (var unitOfWorkChild = new EntityFrameworkUnitOfWork(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext))
{
await unitOfWorkChild.CommitAsync();
}
await unitOfWorkParent.CommitAsync();
}
unitOfWorkParentMock.Protected().Verify("CommitAsyncCore", Times.Once(), new CancellationToken());
}
[Fact]
public async Task Commit_Transaction_CallRollback()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scopeMock = new Mock<UnitOfWorkTransactionScope<InMemoryDbContext>>(unitOfWorkProvider);
var scope = scopeMock.Object;
await scope.ExecuteAsync(async uowParent =>
{
Assert.True(uowParent.IsInTransaction);
await uowParent.CommitAsync();
Assert.Equal(1, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
using (var uowChild = (EntityFrameworkUnitOfWork<InMemoryDbContext>)unitOfWorkProvider.Create())
{
await uowChild.CommitAsync();
Assert.Equal(1, uowChild.CommitsCount);
Assert.False(uowChild.CommitPending);
Assert.Equal(2, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
}
throw Assert.Throws<RollbackRequestedException>(() => { uowParent.RollbackTransaction(); });
});
scopeMock.Protected().Verify("AfterRollback", Times.Once());
}
[Fact]
public async Task Commit_Transaction_CallRollback_UserCatch()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scopeMock = new Mock<UnitOfWorkTransactionScope<InMemoryDbContext>>(unitOfWorkProvider);
var scope = scopeMock.Object;
await scope.ExecuteAsync(async uowParent =>
{
Assert.True(uowParent.IsInTransaction);
await uowParent.CommitAsync();
using (var uowChild = (EntityFrameworkUnitOfWork<InMemoryDbContext>)unitOfWorkProvider.Create())
{
await uowChild.CommitAsync();
try
{
uowParent.RollbackTransaction();
}
catch (Exception)
{
// user catches any exceptions
}
}
});
scopeMock.Protected().Verify("AfterRollback", Times.Once());
}
[Fact]
public async Task Commit_Transaction_CallRollback_OnException()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scope = unitOfWorkProvider.CreateTransactionScope();
var exceptionKey = Guid.NewGuid().ToString();
try
{
await scope.ExecuteAsync(async uowParent =>
{
Assert.True(uowParent.IsInTransaction);
await uowParent.CommitAsync();
throw new Exception(exceptionKey);
});
}
catch (Exception e) when (e.Message == exceptionKey)
{
// test exception caught, passed
}
}
[Fact]
public async Task Commit_Transaction_CallCommit()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scopeMock = new Mock<UnitOfWorkTransactionScope<InMemoryDbContext>>(unitOfWorkProvider);
var scope = scopeMock.Object;
await scope.ExecuteAsync(async uowParent =>
{
Assert.True(uowParent.IsInTransaction);
await uowParent.CommitAsync();
Assert.Equal(1, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
});
scopeMock.Protected().Verify("AfterCommit", Times.Once());
}
[Fact]
public async Task Commit_Transaction_CallCommit_Nesting()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scopeMock = new Mock<UnitOfWorkTransactionScope<InMemoryDbContext>>(unitOfWorkProvider);
var scope = scopeMock.Object;
await scope.ExecuteAsync(async uowParent =>
{
Assert.True(uowParent.IsInTransaction);
await uowParent.CommitAsync();
Assert.Equal(1, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
using (var uowChild = (EntityFrameworkUnitOfWork<InMemoryDbContext>)unitOfWorkProvider.Create())
{
await uowChild.CommitAsync();
Assert.Equal(1, uowChild.CommitsCount);
Assert.False(uowChild.CommitPending);
Assert.Equal(2, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
using (var uowChildChild = (EntityFrameworkUnitOfWork<InMemoryDbContext>)unitOfWorkProvider.Create())
{
await uowChildChild.CommitAsync();
}
Assert.Equal(2, uowChild.CommitsCount);
Assert.False(uowChild.CommitPending);
Assert.Equal(3, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
}
});
scopeMock.Protected().Verify("AfterCommit", Times.Once());
}
[Fact]
public void Commit_Transaction_CallCommit_Sync()
{
var dbContextFactory = GetContextFactory();
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider<InMemoryDbContext>(unitOfWorkRegistryStub, dbContextFactory);
var scopeMock = new Mock<UnitOfWorkTransactionScope<InMemoryDbContext>>(unitOfWorkProvider);
var scope = scopeMock.Object;
scope.Execute(uowParent =>
{
Assert.True(uowParent.IsInTransaction);
uowParent.Commit();
Assert.Equal(1, uowParent.CommitsCount);
Assert.False(uowParent.CommitPending);
});
scopeMock.Protected().Verify("AfterCommit", Times.Once());
}
[Fact]
public void TryGetDbContext_UnitOfWorkRegistryHasUnitOfWork_ReturnCorrectDbContext()
{
var dbContext = new Mock<DbContext>().Object;
Func<DbContext> dbContextFactory = () => dbContext;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var unitOfWork = new EntityFrameworkUnitOfWork(unitOfWorkProvider, dbContextFactory, DbContextOptions.ReuseParentContext);
unitOfWorkRegistryStub.RegisterUnitOfWork(unitOfWork);
var uowDbContext = EntityFrameworkUnitOfWork.TryGetDbContext(unitOfWorkProvider);
Assert.NotNull(uowDbContext);
Assert.Same(dbContext, uowDbContext);
}
[Fact]
public void TryGetDbContext_UnitOfWorkRegistryHasNotUnitOfWork_ReturnsNull()
{
var dbContext = new Mock<DbContext>().Object;
Func<DbContext> dbContextFactory = () => dbContext;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
var value = EntityFrameworkUnitOfWork.TryGetDbContext(unitOfWorkProvider);
Assert.Null(value);
}
[Fact]
public async Task CommitAsync_WithCancellationTokenInNestedUow_SavedChangesInParentUow()
{
var dbContext = new Mock<DbContext>();
Func<DbContext> dbContextFactory = () => dbContext.Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
using (var uow = unitOfWorkProvider.Create())
{
using (var nested = unitOfWorkProvider.Create())
{
await nested.CommitAsync(new CancellationToken());
// verify, that method has NEVER been called
dbContext.Verify(x => x.SaveChangesAsync(It.IsAny<CancellationToken>()), Times.Never);
}
await uow.CommitAsync(new CancellationToken());
// verify, that method has been called ONCE
dbContext.Verify(x => x.SaveChangesAsync(It.IsAny<CancellationToken>()), Times.Once);
}
}
[Fact]
public async Task CommitAsync_WithoutCancellationTokenInNestedUow_SavedChangesInParentUow()
{
var dbContext = new Mock<DbContext>();
Func<DbContext> dbContextFactory = () => dbContext.Object;
var unitOfWorkRegistryStub = new ThreadLocalUnitOfWorkRegistry();
var unitOfWorkProvider = new EntityFrameworkUnitOfWorkProvider(unitOfWorkRegistryStub, dbContextFactory);
using (var uow = unitOfWorkProvider.Create())
{
using (var nested = unitOfWorkProvider.Create())
{
await nested.CommitAsync();
// verify, that method has NEVER been called
dbContext.Verify(x => x.SaveChangesAsync(It.IsAny<CancellationToken>()), Times.Never);
}
await uow.CommitAsync();
// verify, that method has been called ONCE
dbContext.Verify(x => x.SaveChangesAsync(It.IsAny<CancellationToken>()), Times.Once);
}
}
public class InMemoryDbContext : DbContext
{
#if EFCORE
protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
{
if (!optionsBuilder.IsConfigured)
{
optionsBuilder
.UseInMemoryDatabase(Guid.NewGuid().ToString())
.ConfigureWarnings(w => w.Ignore(InMemoryEventId.TransactionIgnoredWarning));
}
}
#endif
}
private static Func<InMemoryDbContext> GetContextFactory()
{
return () =>
#if EFCORE
new InMemoryDbContext();
#else
new Mock<InMemoryDbContext>().Object;
#endif
}
}
}
|
Java
|
<?php
/**
* Created by PhpStorm.
* User: vjcspy
* Date: 28/05/2016
* Time: 12:56
*/
namespace Modules\IzCore\Repositories;
use Modules\IzCore\Repositories\Object\DataObject;
use Modules\IzCore\Repositories\Theme\View\AdditionViewInterface;
use Pingpong\Modules\Repository;
/**
* Quản lý Theme
* Bao gồm:
* - Data của theme: Merge data từ bên ngoài
* - Quản lý current theme
*
* @package Modules\IzCore\Repositories
*/
class Theme extends DataObject {
/**
* @var
*/
protected $currentPath;
/**
* [
*'path'=>['Modules\IzCore\Repositories\Theme\View\AdditionView]
* ]
*
* @var array
*/
protected $additionData = [];
/**
* @var \Teepluss\Theme\Contracts\Theme
*/
protected $theme;
/**
* @var string
*/
protected $_currentThemeName;
/**
* @var string
*/
protected $_currentLayoutName;
/**
* [
*'path'=>[]
* ]
*
* @var array
*/
protected $data = [];
/**
* @var \Pingpong\Modules\Repository
*/
protected $module;
/**
* All Asset
*
* @var
*/
private $assets;
/**
* @var \Modules\IzCore\Entities\Theme
*/
private $themeModel;
/**
* Theme constructor.
*
* @param \Pingpong\Modules\Repository $module
* @param array $data
*/
public function __construct(
Repository $module,
\Modules\IzCore\Entities\Theme $themeModel,
array $data = []
) {
$this->themeModel = $themeModel;
$this->module = $module;
parent::__construct($data);
}
/**
* Dành cho các module ngoài muốn add data vào 1 path nào đó
*
* @param $path
* @param $data
*/
public function addAdditionData($path, $data) {
foreach ($data as $item) {
if (!isset($this->additionData[$path]))
$this->additionData[$path] = [];
$this->additionData[$path][] = $item;
}
}
/**
* Merge data from another modules to current path
*
* @param null $path
*
* @return $this
*/
public function initAdditionData($path = null) {
if (is_null($path))
$path = $this->getCurrentPath();
if (isset($this->additionData[$path])) {
foreach ($this->additionData[$path] as $item) {
/** @var AdditionViewInterface $item */
$item = app()->make($item);
if (!isset($this->data[$path]))
$this->data[$path] = [];
$this->data[$path] = array_merge($this->data[$path], $item->handle());
}
}
return $this;
}
/**
* @return mixed
*/
public function getCurrentPath() {
return $this->currentPath;
}
/**
* @param mixed $currentPath
*
* @return $this
*/
public function setCurrentPath($currentPath) {
$this->currentPath = $currentPath;
return $this;
}
/**
* Set data to view of current path
*
* @param \Teepluss\Theme\Theme $theme
* @param $path
*
* @return $this
*/
public function initViewData(\Teepluss\Theme\Theme $theme, $path) {
/*Merge from another modules*/
$this->initAdditionData($path);
if (isset($this->data[$path])) {
foreach ($this->data[$path] as $items) {
foreach ($items as $k => $item) {
$theme->set($k, $item);
}
}
}
return $this;
}
/**
* Get all assets in each theme in each module
*
* @return array
* @throws \Exception
*/
public function getAssetsTree() {
if (is_null($this->assets)) {
$this->assets = [];
$pathModules = $this->module->getPath();
$moduleDirs = scandir($pathModules);
foreach ($moduleDirs as $moduleDir) {
if (!in_array($moduleDir, [".", ".."])) {
/*Path Config/Vendor của module hiện tại*/
$currentModuleThemePaths = $pathModules . '/' . $moduleDir . '/themes';
/*Kiểm tra xem module hiện tại có thư mục themes không*/
if (!file_exists($currentModuleThemePaths))
continue;
$themePath = scandir($currentModuleThemePaths);
foreach ($themePath as $themDir) {
if (!in_array($themDir, [".", ".."])) {
$currentThemeDir = $currentModuleThemePaths . '/' . $themDir . '/config.php';
// Check file config.php existed
if (!file_exists($currentThemeDir))
continue;
$themeConfig = (include $currentThemeDir);
if (isset($themeConfig['assets'])) {
$assetWithThemeName = [];
foreach ($themeConfig['assets'] as $k => $asset) {
$asset['theme_name'] = $themDir;
$assetWithThemeName[$k] = $asset;
}
$this->assets = array_merge($this->assets, $assetWithThemeName);
}
}
}
}
}
}
return $this->assets;
}
/**
* Retrieve current theme name
*
* @return string
*/
public function getCurrentThemeName() {
if (is_null($this->_currentThemeName))
$this->_currentThemeName = $this->getTheme()->getThemeName();
return $this->_currentThemeName;
}
/**
* @param string $currentThemeName
*
* @return $this
*/
public function setCurrentThemeName($currentThemeName) {
$this->_currentThemeName = $currentThemeName;
return $this;
}
/**
* @return string
*/
public function getCurrentLayoutName() {
if (is_null($this->_currentLayoutName))
$this->_currentLayoutName = $this->getTheme()->getLayoutName();
return $this->_currentLayoutName;
}
/**
* Retrive current layout use in theme
*
* @param string $currentLayoutName
*
* @return $this
*/
public function setCurrentLayoutName($currentLayoutName) {
$this->_currentLayoutName = $currentLayoutName;
return $this;
}
/**
* @return \Teepluss\Theme\Theme
* @throws \Exception
*/
public function getTheme() {
if (is_null($this->theme)) {
$this->theme = app()->make('\Teepluss\Theme\Contracts\Theme');
};
return $this->theme;
}
/**
* Khai báo sự tồn tại của theme trong App
* Sử dụng để biết view thuộc theme nào. Loại admin hay frontend
*
* @param $themeName
* @param bool $isAdmin
*
* @return $this
*/
public function registerTheme($themeName, $isAdmin = true) {
/* FIXME: need cache here */
$theme = $this->themeModel->query()->firstOrNew(['name' => $themeName]);
$theme->type = $isAdmin == true ? \Modules\IzCore\Entities\Theme::TYPE_ADMIN : \Modules\IzCore\Entities\Theme::TYPE_FRONTEND;
$theme->save();
return $this;
}
}
|
Java
|
package com.ejlchina.searcher.implement;
import com.ejlchina.searcher.*;
import com.ejlchina.searcher.bean.InheritType;
import java.lang.reflect.Field;
import java.util.*;
import java.lang.reflect.Modifier;
import java.util.concurrent.ConcurrentHashMap;
/***
* 默认元信息解析器
* @author Troy.Zhou @ 2021-10-30
* @since v3.0.0
*/
public class DefaultMetaResolver implements MetaResolver {
private final Map<Class<?>, BeanMeta<?>> cache = new ConcurrentHashMap<>();
private SnippetResolver snippetResolver = new DefaultSnippetResolver();
private DbMapping dbMapping;
public DefaultMetaResolver() {
this(new DefaultDbMapping());
}
public DefaultMetaResolver(DbMapping dbMapping) {
this.dbMapping = dbMapping;
}
@Override
public <T> BeanMeta<T> resolve(Class<T> beanClass) {
@SuppressWarnings("unchecked")
BeanMeta<T> beanMeta = (BeanMeta<T>) cache.get(beanClass);
if (beanMeta != null) {
return beanMeta;
}
synchronized (cache) {
beanMeta = resolveMetadata(beanClass);
cache.put(beanClass, beanMeta);
return beanMeta;
}
}
protected <T> BeanMeta<T> resolveMetadata(Class<T> beanClass) {
DbMapping.Table table = dbMapping.table(beanClass);
if (table == null) {
throw new SearchException("The class [" + beanClass.getName() + "] can not be searched, because it can not be resolved by " + dbMapping.getClass());
}
BeanMeta<T> beanMeta = new BeanMeta<>(beanClass, table.getDataSource(),
snippetResolver.resolve(table.getTables()),
snippetResolver.resolve(table.getJoinCond()),
snippetResolver.resolve(table.getGroupBy()),
table.isDistinct());
// 字段解析
Field[] fields = getBeanFields(beanClass);
for (int index = 0; index < fields.length; index++) {
Field field = fields[index];
if (Modifier.isStatic(field.getModifiers())) {
continue;
}
DbMapping.Column column = dbMapping.column(beanClass, fields[index]);
if (column == null) {
continue;
}
field.setAccessible(true);
SqlSnippet snippet = snippetResolver.resolve(column.getFieldSql());
// 注意:Oracle 数据库的别名不能以下划线开头
FieldMeta fieldMeta = new FieldMeta(beanMeta, field, snippet, "c_" + index,
column.isConditional(), column.getOnlyOn());
beanMeta.addFieldMeta(field.getName(), fieldMeta);
}
if (beanMeta.getFieldCount() == 0) {
throw new SearchException("[" + beanClass.getName() + "] is not a valid SearchBean, because there is no field mapping to database.");
}
return beanMeta;
}
protected Field[] getBeanFields(Class<?> beanClass) {
InheritType iType = dbMapping.inheritType(beanClass);
List<Field> fieldList = new ArrayList<>();
Set<String> fieldNames = new HashSet<>();
while (beanClass != Object.class) {
for (Field field : beanClass.getDeclaredFields()) {
String name = field.getName();
int modifiers = field.getModifiers();
if (field.isSynthetic() || Modifier.isStatic(modifiers)
|| Modifier.isTransient(modifiers)
|| fieldNames.contains(name)) {
continue;
}
fieldList.add(field);
fieldNames.add(name);
}
if (iType != InheritType.FIELD && iType != InheritType.ALL) {
break;
}
beanClass = beanClass.getSuperclass();
}
return fieldList.toArray(new Field[0]);
}
public SnippetResolver getSnippetResolver() {
return snippetResolver;
}
public void setSnippetResolver(SnippetResolver snippetResolver) {
this.snippetResolver = Objects.requireNonNull(snippetResolver);
}
public DbMapping getDbMapping() {
return dbMapping;
}
public void setDbMapping(DbMapping dbMapping) {
this.dbMapping = Objects.requireNonNull(dbMapping);
}
}
|
Java
|
# AUTOGENERATED FILE
FROM balenalib/generic-amd64-debian:bullseye-run
ENV NODE_VERSION 14.18.3
ENV YARN_VERSION 1.22.4
RUN buildDeps='curl libatomic1' \
&& set -x \
&& for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --batch --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --batch --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --batch --keyserver keyserver.ubuntu.com --recv-keys "$key" ; \
done \
&& apt-get update && apt-get install -y $buildDeps --no-install-recommends \
&& rm -rf /var/lib/apt/lists/* \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-x64.tar.gz" \
&& echo "bd96f88e054801d1368787f7eaf77b49cd052b9543c56bd6bc0bfc90310e2756 node-v$NODE_VERSION-linux-x64.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-x64.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-x64.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/test-stack@node.sh" \
&& echo "Running test-stack@node" \
&& chmod +x test-stack@node.sh \
&& bash test-stack@node.sh \
&& rm -rf test-stack@node.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: Intel 64-bit (x86-64) \nOS: Debian Bullseye \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v14.18.3, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
Java
|
'use strict';
// https://github.com/tc39/proposal-iterator-helpers
var aCallable = require('../internals/a-callable');
var anObject = require('../internals/an-object');
var getBuiltIn = require('../internals/get-built-in');
var getMethod = require('../internals/get-method');
var MAX_SAFE_INTEGER = 0x1FFFFFFFFFFFFF;
var createMethod = function (TYPE) {
var IS_TO_ARRAY = TYPE == 0;
var IS_FOR_EACH = TYPE == 1;
var IS_EVERY = TYPE == 2;
var IS_SOME = TYPE == 3;
return function (iterator, fn, target) {
anObject(iterator);
var Promise = getBuiltIn('Promise');
var next = aCallable(iterator.next);
var index = 0;
var MAPPING = fn !== undefined;
if (MAPPING || !IS_TO_ARRAY) aCallable(fn);
return new Promise(function (resolve, reject) {
var closeIteration = function (method, argument) {
try {
var returnMethod = getMethod(iterator, 'return');
if (returnMethod) {
return Promise.resolve(returnMethod.call(iterator)).then(function () {
method(argument);
}, function (error) {
reject(error);
});
}
} catch (error2) {
return reject(error2);
} method(argument);
};
var onError = function (error) {
closeIteration(reject, error);
};
var loop = function () {
try {
if (IS_TO_ARRAY && (index > MAX_SAFE_INTEGER) && MAPPING) {
throw TypeError('The allowed number of iterations has been exceeded');
}
Promise.resolve(anObject(next.call(iterator))).then(function (step) {
try {
if (anObject(step).done) {
if (IS_TO_ARRAY) {
target.length = index;
resolve(target);
} else resolve(IS_SOME ? false : IS_EVERY || undefined);
} else {
var value = step.value;
if (MAPPING) {
Promise.resolve(IS_TO_ARRAY ? fn(value, index) : fn(value)).then(function (result) {
if (IS_FOR_EACH) {
loop();
} else if (IS_EVERY) {
result ? loop() : closeIteration(resolve, false);
} else if (IS_TO_ARRAY) {
target[index++] = result;
loop();
} else {
result ? closeIteration(resolve, IS_SOME || value) : loop();
}
}, onError);
} else {
target[index++] = value;
loop();
}
}
} catch (error) { onError(error); }
}, onError);
} catch (error2) { onError(error2); }
};
loop();
});
};
};
module.exports = {
toArray: createMethod(0),
forEach: createMethod(1),
every: createMethod(2),
some: createMethod(3),
find: createMethod(4)
};
|
Java
|
---
layout: blog-archive
title: Observatory
permalink: /blog/category/observatory/
archive-name: observatory
archive-type: Category
breadcrumb: blog
---
|
Java
|
/*!
* ${copyright}
*/
sap.ui.require([
"jquery.sap.global",
"sap/ui/base/SyncPromise",
"sap/ui/model/BindingMode",
"sap/ui/model/ChangeReason",
"sap/ui/model/ClientListBinding",
"sap/ui/model/Context",
"sap/ui/model/ContextBinding",
"sap/ui/model/Filter",
"sap/ui/model/MetaModel",
"sap/ui/model/PropertyBinding",
"sap/ui/model/Sorter",
"sap/ui/model/odata/OperationMode",
"sap/ui/model/odata/type/Int64",
"sap/ui/model/odata/type/Raw",
"sap/ui/model/odata/v4/AnnotationHelper",
"sap/ui/model/odata/v4/Context",
"sap/ui/model/odata/v4/lib/_Helper",
"sap/ui/model/odata/v4/ODataMetaModel",
"sap/ui/model/odata/v4/ODataModel",
"sap/ui/model/odata/v4/ValueListType",
"sap/ui/test/TestUtils",
"sap/ui/thirdparty/URI"
], function (jQuery, SyncPromise, BindingMode, ChangeReason, ClientListBinding, BaseContext,
ContextBinding, Filter, MetaModel, PropertyBinding, Sorter, OperationMode, Int64, Raw,
AnnotationHelper, Context, _Helper, ODataMetaModel, ODataModel, ValueListType, TestUtils,
URI) {
/*global QUnit, sinon */
/*eslint max-nested-callbacks: 0, no-loop-func: 0, no-warning-comments: 0 */
"use strict";
// Common := com.sap.vocabularies.Common.v1
// tea_busi := com.sap.gateway.default.iwbep.tea_busi.v0001
// tea_busi_product.v0001 := com.sap.gateway.default.iwbep.tea_busi_product.v0001
// tea_busi_supplier.v0001 := com.sap.gateway.default.iwbep.tea_busi_supplier.v0001
// UI := com.sap.vocabularies.UI.v1
var mMostlyEmptyScope = {
"$EntityContainer" : "empty.DefaultContainer",
"$Version" : "4.0",
"empty." : {
"$kind" : "Schema"
},
"empty.DefaultContainer" : {
"$kind" : "EntityContainer"
}
},
sODataMetaModel = "sap.ui.model.odata.v4.ODataMetaModel",
mProductScope = {
"$EntityContainer" : "tea_busi_product.v0001.DefaultContainer",
"$Reference" : {
"../../../../default/iwbep/tea_busi_supplier/0001/$metadata" : {
"$Include" : [
"tea_busi_supplier.v0001."
]
}
},
"$Version" : "4.0",
"tea_busi_product.v0001." : {
"$kind" : "Schema",
"$Annotations" : { // Note: simulate result of _MetadataRequestor#read
"tea_busi_product.v0001.Category/CategoryName" : {
"@Common.Label" : "CategoryName from tea_busi_product.v0001."
}
}
},
"tea_busi_product.v0001.Category" : {
"$kind" : "EntityType",
"CategoryName" : {
"$kind" : "Property",
"$Type" : "Edm.String"
}
},
"tea_busi_product.v0001.DefaultContainer" : {
"$kind" : "EntityContainer"
},
"tea_busi_product.v0001.Product" : {
"$kind" : "EntityType",
"Name" : {
"$kind" : "Property",
"$Type" : "Edm.String"
},
"PRODUCT_2_CATEGORY" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi_product.v0001.Category"
},
"PRODUCT_2_SUPPLIER" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi_supplier.v0001.Supplier"
}
}
},
sSampleServiceUrl
= "/sap/opu/odata4/sap/zui5_testv4/default/sap/zui5_epm_sample/0002/",
mScope = {
"$Annotations" : {
"name.space.Id" : {
"@Common.Label" : "ID"
},
"tea_busi.DefaultContainer" : {
"@DefaultContainer" : {}
},
"tea_busi.DefaultContainer/T€AMS" : {
"@T€AMS" : {}
},
"tea_busi.TEAM" : {
"@Common.Text" : {
"$Path" : "Name"
},
"@Common.Text@UI.TextArrangement" : {
"$EnumMember" : "UI.TextArrangementType/TextLast"
},
"@UI.Badge" : {
"@Common.Label" : "Label inside",
"$Type" : "UI.BadgeType",
"HeadLine" : {
"$Type" : "UI.DataField",
"Value" : {
"$Path" : "Name"
}
},
"Title" : {
"$Type" : "UI.DataField",
"Value" : {
"$Path" : "Team_Id"
}
}
},
"@UI.Badge@Common.Label" : "Best Badge Ever!",
"@UI.LineItem" : [{
"@UI.Importance" : {
"$EnumMember" : "UI.ImportanceType/High"
},
"$Type" : "UI.DataField",
"Label" : "Team ID",
"Label@Common.Label" : "Team ID's Label",
"Value" : {
"$Path" : "Team_Id"
}
}]
},
"tea_busi.TEAM/Team_Id" : {
"@Common.Label" : "Team ID",
"@Common.Text" : {
"$Path" : "Name"
},
"@Common.Text@UI.TextArrangement" : {
"$EnumMember" : "UI.TextArrangementType/TextLast"
}
},
"tea_busi.Worker" : {
"@UI.Facets" : [{
"$Type" : "UI.ReferenceFacet",
"Target" : {
// term cast
"$AnnotationPath" : "@UI.LineItem"
}
}, {
"$Type" : "UI.ReferenceFacet",
"Target" : {
// term cast at navigation property itself
"$AnnotationPath" : "EMPLOYEE_2_TEAM@Common.Label"
}
}, {
"$Type" : "UI.ReferenceFacet",
"Target" : {
// navigation property and term cast
"$AnnotationPath" : "EMPLOYEE_2_TEAM/@UI.LineItem"
}
}, {
"$Type" : "UI.ReferenceFacet",
"Target" : {
// type cast, navigation properties and term cast (at its type)
"$AnnotationPath"
: "tea_busi.TEAM/TEAM_2_EMPLOYEES/EMPLOYEE_2_TEAM/@UI.LineItem"
}
}],
"@UI.LineItem" : [{
"$Type" : "UI.DataField",
"Label" : "Team ID",
"Value" : {
"$Path" : "EMPLOYEE_2_TEAM/Team_Id"
}
}]
},
"tea_busi.Worker/EMPLOYEE_2_TEAM" : {
"@Common.Label" : "Employee's Team"
}
},
"$EntityContainer" : "tea_busi.DefaultContainer",
"empty." : {
"$kind" : "Schema"
},
"name.space." : {
"$kind" : "Schema"
},
"tea_busi." : {
"$kind" : "Schema",
"@Schema" : {}
},
"empty.Container" : {
"$kind" : "EntityContainer"
},
"name.space.BadContainer" : {
"$kind" : "EntityContainer",
"DanglingActionImport" : {
"$kind" : "ActionImport",
"$Action" : "not.Found"
},
"DanglingFunctionImport" : {
"$kind" : "FunctionImport",
"$Function" : "not.Found"
}
},
"name.space.Broken" : {
"$kind" : "Term",
"$Type" : "not.Found"
},
"name.space.BrokenFunction" : [{
"$kind" : "Function",
"$ReturnType" : {
"$Type" : "not.Found"
}
}],
"name.space.BrokenOverloads" : [{
"$kind" : "Operation"
}],
"name.space.DerivedPrimitiveFunction" : [{
"$kind" : "Function",
"$ReturnType" : {
"$Type" : "name.space.Id"
}
}],
"name.space.EmptyOverloads" : [],
"name.space.Id" : {
"$kind" : "TypeDefinition",
"$UnderlyingType" : "Edm.String",
"$MaxLength" : 10
},
"name.space.Term" : { // only case with a qualified name and a $Type
"$kind" : "Term",
"$Type" : "tea_busi.Worker"
},
"name.space.OverloadedAction" : [{
"$kind" : "Action",
"$IsBound" : true,
"$Parameter" : [{
// "$Name" : "_it",
"$Type" : "tea_busi.EQUIPMENT"
}],
"$ReturnType" : {
"$Type" : "tea_busi.EQUIPMENT"
}
}, {
"$kind" : "Action",
"$IsBound" : true,
"$Parameter" : [{
// "$Name" : "_it",
"$Type" : "tea_busi.TEAM"
}],
"$ReturnType" : {
"$Type" : "tea_busi.TEAM"
}
}, { // "An unbound action MAY have the same name as a bound action."
"$kind" : "Action",
"$ReturnType" : {
"$Type" : "tea_busi.ComplexType_Salary"
}
}, {
"$kind" : "Action",
"$IsBound" : true,
"$Parameter" : [{
// "$Name" : "_it",
"$Type" : "tea_busi.Worker"
}],
"$ReturnType" : {
"$Type" : "tea_busi.Worker"
}
}],
"name.space.OverloadedFunction" : [{
"$kind" : "Function",
"$ReturnType" : {
"$Type" : "Edm.String"
}
}, {
"$kind" : "Function",
"$ReturnType" : {
"$Type" : "Edm.String"
}
}],
"name.space.VoidAction" : [{
"$kind" : "Action"
}],
"tea_busi.AcChangeManagerOfTeam" : [{
"$kind" : "Action",
"$ReturnType" : {
"$Type" : "tea_busi.TEAM",
"@Common.Label" : "Hail to the Chief"
}
}],
"tea_busi.ComplexType_Salary" : {
"$kind" : "ComplexType",
"AMOUNT" : {
"$kind" : "Property",
"$Type" : "Edm.Decimal"
},
"CURRENCY" : {
"$kind" : "Property",
"$Type" : "Edm.String"
}
},
"tea_busi.ContainedC" : {
"$kind" : "EntityType",
"$Key" : ["Id"],
"Id" : {
"$kind" : "Property",
"$Type" : "Edm.String"
},
"C_2_EMPLOYEE" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.Worker"
},
"C_2_S" : {
"$ContainsTarget" : true,
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.ContainedS"
}
},
"tea_busi.ContainedS" : {
"$kind" : "EntityType",
"$Key" : ["Id"],
"Id" : {
"$kind" : "Property",
"$Type" : "Edm.String"
},
"S_2_C" : {
"$ContainsTarget" : true,
"$kind" : "NavigationProperty",
"$isCollection" : true,
"$Type" : "tea_busi.ContainedC"
},
"S_2_EMPLOYEE" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.Worker"
}
},
"tea_busi.DefaultContainer" : {
"$kind" : "EntityContainer",
"ChangeManagerOfTeam" : {
"$kind" : "ActionImport",
"$Action" : "tea_busi.AcChangeManagerOfTeam"
},
"EMPLOYEES" : {
"$kind" : "EntitySet",
"$NavigationPropertyBinding" : {
"EMPLOYEE_2_TEAM" : "T€AMS",
"EMPLOYEE_2_EQUIPM€NTS" : "EQUIPM€NTS"
},
"$Type" : "tea_busi.Worker"
},
"EQUIPM€NTS" : {
"$kind" : "EntitySet",
"$Type" : "tea_busi.EQUIPMENT"
},
"GetEmployeeMaxAge" : {
"$kind" : "FunctionImport",
"$Function" : "tea_busi.FuGetEmployeeMaxAge"
},
"Me" : {
"$kind" : "Singleton",
"$NavigationPropertyBinding" : {
"EMPLOYEE_2_TEAM" : "T€AMS",
"EMPLOYEE_2_EQUIPM€NTS" : "EQUIPM€NTS"
},
"$Type" : "tea_busi.Worker"
},
"OverloadedAction" : {
"$kind" : "ActionImport",
"$Action" : "name.space.OverloadedAction"
},
"TEAMS" : {
"$kind" : "EntitySet",
"$NavigationPropertyBinding" : {
"TEAM_2_EMPLOYEES" : "EMPLOYEES",
"TEAM_2_CONTAINED_S/S_2_EMPLOYEE" : "EMPLOYEES"
},
"$Type" : "tea_busi.TEAM"
},
"T€AMS" : {
"$kind" : "EntitySet",
"$NavigationPropertyBinding" : {
"TEAM_2_EMPLOYEES" : "EMPLOYEES"
},
"$Type" : "tea_busi.TEAM"
},
"VoidAction" : {
"$kind" : "ActionImport",
"$Action" : "name.space.VoidAction"
}
},
"tea_busi.EQUIPMENT" : {
"$kind" : "EntityType",
"$Key" : ["ID"],
"ID" : {
"$kind" : "Property",
"$Type" : "Edm.Int32",
"$Nullable" : false
}
},
"tea_busi.FuGetEmployeeMaxAge" : [{
"$kind" : "Function",
"$ReturnType" : {
"$Type" : "Edm.Int16"
}
}],
"tea_busi.TEAM" : {
"$kind" : "EntityType",
"$Key" : ["Team_Id"],
"Team_Id" : {
"$kind" : "Property",
"$Type" : "name.space.Id",
"$Nullable" : false,
"$MaxLength" : 10
},
"Name" : {
"$kind" : "Property",
"$Type" : "Edm.String",
"$Nullable" : false,
"$MaxLength" : 40
},
"TEAM_2_EMPLOYEES" : {
"$kind" : "NavigationProperty",
"$isCollection" : true,
"$OnDelete" : "None",
"$OnDelete@Common.Label" : "None of my business",
"$ReferentialConstraint" : {
"foo" : "bar",
"foo@Common.Label" : "Just a Gigolo"
},
"$Type" : "tea_busi.Worker"
},
"TEAM_2_CONTAINED_S" : {
"$ContainsTarget" : true,
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.ContainedS"
},
"TEAM_2_CONTAINED_C" : {
"$ContainsTarget" : true,
"$kind" : "NavigationProperty",
"$isCollection" : true,
"$Type" : "tea_busi.ContainedC"
},
// Note: "value" is a symbolic name for an operation's return type iff. it is
// primitive
"value" : {
"$kind" : "Property",
"$Type" : "Edm.String"
}
},
"tea_busi.Worker" : {
"$kind" : "EntityType",
"$Key" : ["ID"],
"ID" : {
"$kind" : "Property",
"$Type" : "Edm.String",
"$Nullable" : false,
"$MaxLength" : 4
},
"AGE" : {
"$kind" : "Property",
"$Type" : "Edm.Int16",
"$Nullable" : false
},
"EMPLOYEE_2_CONTAINED_S" : {
"$ContainsTarget" : true,
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.ContainedS"
},
"EMPLOYEE_2_EQUIPM€NTS" : {
"$kind" : "NavigationProperty",
"$isCollection" : true,
"$Type" : "tea_busi.EQUIPMENT",
"$Nullable" : false
},
"EMPLOYEE_2_TEAM" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi.TEAM",
"$Nullable" : false
},
"SALÃRY" : {
"$kind" : "Property",
"$Type" : "tea_busi.ComplexType_Salary"
}
},
"$$Loop" : "$$Loop/", // some endless loop
"$$Term" : "name.space.Term" // replacement for any reference to the term
},
oContainerData = mScope["tea_busi.DefaultContainer"],
aOverloadedAction = mScope["name.space.OverloadedAction"],
mSupplierScope = {
"$Version" : "4.0",
"tea_busi_supplier.v0001." : {
"$kind" : "Schema"
},
"tea_busi_supplier.v0001.Supplier" : {
"$kind" : "EntityType",
"Supplier_Name" : {
"$kind" : "Property",
"$Type" : "Edm.String"
}
}
},
oTeamData = mScope["tea_busi.TEAM"],
oTeamLineItem = mScope.$Annotations["tea_busi.TEAM"]["@UI.LineItem"],
oWorkerData = mScope["tea_busi.Worker"],
mXServiceScope = {
"$Version" : "4.0",
"$Annotations" : {}, // simulate ODataMetaModel#_mergeAnnotations
"$EntityContainer" : "tea_busi.v0001.DefaultContainer",
"$Reference" : {
// Note: Do not reference tea_busi_supplier directly from here! We want to test the
// special case that it is only indirectly referenced.
"../../../../default/iwbep/tea_busi_foo/0001/$metadata" : {
"$Include" : [
"tea_busi_foo.v0001."
]
},
"../../../../default/iwbep/tea_busi_product/0001/$metadata" : {
"$Include" : [
"ignore.me.",
"tea_busi_product.v0001."
]
},
"/empty/$metadata" : {
"$Include" : [
"empty.",
"I.still.haven't.found.what.I'm.looking.for."
]
}
},
"tea_busi.v0001." : {
"$kind" : "Schema"
},
"tea_busi.v0001.DefaultContainer" : {
"$kind" : "EntityContainer",
"EQUIPM€NTS" : {
"$kind" : "EntitySet",
"$Type" : "tea_busi.v0001.EQUIPMENT"
}
},
"tea_busi.v0001.EQUIPMENT" : {
"$kind" : "EntityType",
"EQUIPMENT_2_PRODUCT" : {
"$kind" : "NavigationProperty",
"$Type" : "tea_busi_product.v0001.Product"
}
}
},
aAllScopes = [
mMostlyEmptyScope,
mProductScope,
mScope,
mSupplierScope,
mXServiceScope
];
/**
* Checks the "get*" and "request*" methods corresponding to the named "fetch*" method,
* using the given arguments.
*
* @param {object} oTestContext
* the QUnit "this" object
* @param {object} assert
* the QUnit "assert" object
* @param {string} sMethodName
* method name "fetch*"
* @param {object[]} aArguments
* method arguments
* @param {boolean} [bThrow=false]
* whether the "get*" method throws if the promise is not fulfilled
* @returns {Promise}
* the "request*" method's promise
*/
function checkGetAndRequest(oTestContext, assert, sMethodName, aArguments, bThrow) {
var oExpectation,
sGetMethodName = sMethodName.replace("fetch", "get"),
oMetaModel = oTestContext.oMetaModel,
oReason = new Error("rejected"),
oRejectedPromise = Promise.reject(oReason),
sRequestMethodName = sMethodName.replace("fetch", "request"),
oResult = {},
oSyncPromise = SyncPromise.resolve(oRejectedPromise);
// resolve...
oExpectation = oTestContext.mock(oMetaModel).expects(sMethodName).exactly(4);
oExpectation = oExpectation.withExactArgs.apply(oExpectation, aArguments);
oExpectation.returns(SyncPromise.resolve(oResult));
// get: fulfilled
assert.strictEqual(oMetaModel[sGetMethodName].apply(oMetaModel, aArguments), oResult);
// reject...
oExpectation.returns(oSyncPromise);
oTestContext.mock(Promise).expects("resolve")
.withExactArgs(sinon.match.same(oSyncPromise))
.returns(oRejectedPromise); // return any promise (this is not unwrapping!)
// request (promise still pending!)
assert.strictEqual(oMetaModel[sRequestMethodName].apply(oMetaModel, aArguments),
oRejectedPromise);
// get: pending
if (bThrow) {
assert.throws(function () {
oMetaModel[sGetMethodName].apply(oMetaModel, aArguments);
}, new Error("Result pending"));
} else {
assert.strictEqual(oMetaModel[sGetMethodName].apply(oMetaModel, aArguments), undefined,
"pending");
}
return oSyncPromise.catch(function () {
// get: rejected
if (bThrow) {
assert.throws(function () {
oMetaModel[sGetMethodName].apply(oMetaModel, aArguments);
}, oReason);
} else {
assert.strictEqual(oMetaModel[sGetMethodName].apply(oMetaModel, aArguments),
undefined, "rejected");
}
});
}
/**
* Returns a clone, that is a deep copy, of the given object.
*
* @param {object} o
* any serializable object
* @returns {object}
* a deep copy of <code>o</code>
*/
function clone(o) {
return JSON.parse(JSON.stringify(o));
}
/**
* Runs the given test for each name/value pair in the given fixture. The name is interpreted
* as a path "[<sContextPath>'|']<sMetaPath>" and cut accordingly. The test is called with
* an almost resolved sPath (just '|' replaced by '/').
*
* @param {object} mFixture
* map<string, any>
* @param {function} fnTest
* function(string sPath, any vResult, string sContextPath, string sMetaPath)
*/
function forEach(mFixture, fnTest) {
var sPath;
for (sPath in mFixture) {
var i = sPath.indexOf("|"),
sContextPath = "",
sMetaPath = sPath.slice(i + 1),
vValue = mFixture[sPath];
if (i >= 0) {
sContextPath = sPath.slice(0, i);
sPath = sContextPath + "/" + sMetaPath;
}
fnTest(sPath, vValue, sContextPath, sMetaPath);
}
}
//*********************************************************************************************
QUnit.module("sap.ui.model.odata.v4.ODataMetaModel", {
// remember copy to ensure test isolation
mOriginalScopes : clone(aAllScopes),
afterEach : function (assert) {
assert.deepEqual(aAllScopes, this.mOriginalScopes, "metadata unchanged");
},
/*
* Allow warnings if told to; always suppress debug messages.
*/
allowWarnings : function (assert, bWarn) {
this.mock(jQuery.sap.log).expects("isLoggable").atLeast(1)
.withExactArgs(sinon.match.number, sODataMetaModel)
.callsFake(function (iLogLevel) {
switch (iLogLevel) {
case jQuery.sap.log.Level.DEBUG:
return false;
case jQuery.sap.log.Level.WARNING:
return bWarn;
default:
return true;
}
});
},
beforeEach : function () {
var oMetadataRequestor = {
read : function () { throw new Error(); }
},
sUrl = "/a/b/c/d/e/$metadata";
this.oLogMock = this.mock(jQuery.sap.log);
this.oLogMock.expects("warning").never();
this.oLogMock.expects("error").never();
this.oMetaModel = new ODataMetaModel(oMetadataRequestor, sUrl);
this.oMetaModelMock = this.mock(this.oMetaModel);
this.oModel = {
reportError : function () {
throw new Error("Unsupported operation");
},
resolve : ODataModel.prototype.resolve
};
},
/*
* Expect the given debug message with the given path, but only if debug level is on.
*/
expectDebug : function (bDebug, sMessage, sPath) {
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.DEBUG, sODataMetaModel).returns(bDebug);
this.oLogMock.expects("debug").exactly(bDebug ? 1 : 0)
.withExactArgs(sMessage, sPath, sODataMetaModel);
},
/*
* Expects "fetchEntityContainer" to be called at least once on the current meta model,
* returning a clone of the given scope.
*
* @param {object} mScope
*/
expectFetchEntityContainer : function (mScope) {
mScope = clone(mScope);
this.oMetaModel.validate("n/a", mScope); // fill mSchema2MetadataUrl!
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(mScope));
}
});
//*********************************************************************************************
QUnit.test("basics", function (assert) {
var sAnnotationUri = "my/annotation.xml",
aAnnotationUris = [ sAnnotationUri, "uri2.xml"],
oModel = {},
oMetadataRequestor = this.oMetaModel.oRequestor,
sUrl = "/~/$metadata",
oMetaModel;
// code under test
assert.strictEqual(ODataMetaModel.prototype.$$valueAsPromise, true);
// code under test
oMetaModel = new ODataMetaModel(oMetadataRequestor, sUrl);
assert.ok(oMetaModel instanceof MetaModel);
assert.strictEqual(oMetaModel.aAnnotationUris, undefined);
assert.ok(oMetaModel.hasOwnProperty("aAnnotationUris"), "own property aAnnotationUris");
assert.strictEqual(oMetaModel.oRequestor, oMetadataRequestor);
assert.strictEqual(oMetaModel.sUrl, sUrl);
assert.strictEqual(oMetaModel.getDefaultBindingMode(), BindingMode.OneTime);
assert.strictEqual(oMetaModel.toString(),
"sap.ui.model.odata.v4.ODataMetaModel: /~/$metadata");
// code under test
oMetaModel.setDefaultBindingMode(BindingMode.OneWay);
assert.strictEqual(oMetaModel.getDefaultBindingMode(), BindingMode.OneWay);
// code under test
oMetaModel = new ODataMetaModel(oMetadataRequestor, sUrl, aAnnotationUris);
assert.strictEqual(oMetaModel.aAnnotationUris, aAnnotationUris, "arrays are passed");
// code under test
oMetaModel = new ODataMetaModel(oMetadataRequestor, sUrl, sAnnotationUri);
assert.deepEqual(oMetaModel.aAnnotationUris, [sAnnotationUri],
"single annotation is wrapped");
// code under test
oMetaModel = new ODataMetaModel(null, null, null, oModel);
// code under test
assert.strictEqual(oMetaModel.getAdapterFactoryModulePath(),
"sap/ui/model/odata/v4/meta/ODataAdapterFactory");
});
//*********************************************************************************************
QUnit.test("forbidden", function (assert) {
assert.throws(function () { //TODO implement
this.oMetaModel.bindTree();
}, new Error("Unsupported operation: v4.ODataMetaModel#bindTree"));
assert.throws(function () {
this.oMetaModel.getOriginalProperty();
}, new Error("Unsupported operation: v4.ODataMetaModel#getOriginalProperty"));
assert.throws(function () { //TODO implement
this.oMetaModel.isList();
}, new Error("Unsupported operation: v4.ODataMetaModel#isList"));
assert.throws(function () {
this.oMetaModel.refresh();
}, new Error("Unsupported operation: v4.ODataMetaModel#refresh"));
assert.throws(function () {
this.oMetaModel.setLegacySyntax(); // argument does not matter!
}, new Error("Unsupported operation: v4.ODataMetaModel#setLegacySyntax"));
assert.throws(function () {
this.oMetaModel.setDefaultBindingMode(BindingMode.TwoWay);
});
});
//*********************************************************************************************
[
undefined,
["/my/annotation.xml"],
["/my/annotation.xml", "/another/annotation.xml"]
].forEach(function (aAnnotationURI) {
var title = "fetchEntityContainer - " + JSON.stringify(aAnnotationURI);
QUnit.test(title, function (assert) {
var oRequestorMock = this.mock(this.oMetaModel.oRequestor),
aReadResults,
mRootScope = {},
oSyncPromise,
that = this;
function expectReads(bPrefetch) {
oRequestorMock.expects("read")
.withExactArgs(that.oMetaModel.sUrl, false, bPrefetch)
.returns(Promise.resolve(mRootScope));
aReadResults = [];
(aAnnotationURI || []).forEach(function (sAnnotationUrl) {
var oAnnotationResult = {};
aReadResults.push(oAnnotationResult);
oRequestorMock.expects("read")
.withExactArgs(sAnnotationUrl, true, bPrefetch)
.returns(Promise.resolve(oAnnotationResult));
});
}
this.oMetaModel.aAnnotationUris = aAnnotationURI;
this.oMetaModelMock.expects("_mergeAnnotations").never();
expectReads(true);
// code under test
assert.strictEqual(this.oMetaModel.fetchEntityContainer(true), null);
// bPrefetch => no caching
expectReads(true);
// code under test
assert.strictEqual(this.oMetaModel.fetchEntityContainer(true), null);
// now test [bPrefetch=false]
expectReads();
this.oMetaModelMock.expects("_mergeAnnotations")
.withExactArgs(mRootScope, aReadResults);
// code under test
oSyncPromise = this.oMetaModel.fetchEntityContainer();
// pending
assert.strictEqual(oSyncPromise.isPending(), true);
// already caching
assert.strictEqual(this.oMetaModel.fetchEntityContainer(), oSyncPromise);
assert.strictEqual(this.oMetaModel.fetchEntityContainer(true), oSyncPromise,
"now bPrefetch makes no difference");
return oSyncPromise.then(function (mRootScope0) {
assert.strictEqual(mRootScope0, mRootScope);
// still caching
assert.strictEqual(that.oMetaModel.fetchEntityContainer(), oSyncPromise);
});
});
});
//TODO later support "$Extends" : "<13.1.2 EntityContainer Extends>"
//*********************************************************************************************
QUnit.test("fetchEntityContainer: _mergeAnnotations fails", function (assert) {
var oError = new Error();
this.mock(this.oMetaModel.oRequestor).expects("read")
.withExactArgs(this.oMetaModel.sUrl, false, undefined)
.returns(Promise.resolve({}));
this.oMetaModelMock.expects("_mergeAnnotations").throws(oError);
return this.oMetaModel.fetchEntityContainer().then(function () {
assert.ok(false, "unexpected success");
}, function (oError0) {
assert.strictEqual(oError0, oError);
});
});
//*********************************************************************************************
QUnit.test("getMetaContext", function (assert) {
var oMetaContext;
this.oMetaModelMock.expects("getMetaPath")
.withExactArgs("/Foo/-1/bar")
.returns("/Foo/bar");
// code under test
oMetaContext = this.oMetaModel.getMetaContext("/Foo/-1/bar");
assert.strictEqual(oMetaContext.getModel(), this.oMetaModel);
assert.strictEqual(oMetaContext.getPath(), "/Foo/bar");
});
//*********************************************************************************************
QUnit.test("getMetaPath", function (assert) {
var sMetaPath = {},
sPath = {};
this.mock(_Helper).expects("getMetaPath")
.withExactArgs(sinon.match.same(sPath)).returns(sMetaPath);
assert.strictEqual(this.oMetaModel.getMetaPath(sPath), sMetaPath);
});
//*********************************************************************************************
forEach({
// absolute path
"/" : "/",
"/foo/bar|/" : "/", // context is ignored
// relative path
"" : undefined, // w/o context --> important for MetaModel#createBindingContext etc.
"|foo/bar" : undefined, // w/o context
"/|" : "/",
"/|foo/bar" : "/foo/bar",
"/foo|bar" : "/foo/bar",
"/foo/bar|" : "/foo/bar",
"/foo/|bar" : "/foo/bar",
// trailing slash is preserved
"/foo/bar/" : "/foo/bar/",
"/foo|bar/" : "/foo/bar/",
// relative path that starts with a dot
"/foo/bar|./" : "/foo/bar/",
"/foo|./bar/" : "/foo/bar/",
"/foo/|./bar/" : "/foo/bar/",
// annotations
"/foo|@bar" : "/foo@bar",
"/foo/|@bar" : "/foo/@bar",
"/foo|./@bar" : "/foo/@bar",
"/foo/|./@bar" : "/foo/@bar",
// technical properties
"/foo|$kind" : "/foo/$kind",
"/foo/|$kind" : "/foo/$kind",
"/foo|./$kind" : "/foo/$kind",
"/foo/|./$kind" : "/foo/$kind"
}, function (sPath, sResolvedPath, sContextPath, sMetaPath) {
QUnit.test("resolve: " + sContextPath + " > " + sMetaPath, function (assert) {
var oContext = sContextPath && this.oMetaModel.getContext(sContextPath);
assert.strictEqual(this.oMetaModel.resolve(sMetaPath, oContext), sResolvedPath);
});
});
//TODO make sure that Context objects are only created for absolute paths?!
//*********************************************************************************************
[".bar", ".@bar", ".$kind"].forEach(function (sPath) {
QUnit.test("resolve: unsupported relative path " + sPath, function (assert) {
var oContext = this.oMetaModel.getContext("/foo");
assert.raises(function () {
this.oMetaModel.resolve(sPath, oContext);
}, new Error("Unsupported relative path: " + sPath));
});
});
//*********************************************************************************************
QUnit.test("resolve: undefined", function (assert) {
assert.strictEqual(
this.oMetaModel.resolve(undefined, this.oMetaModel.getContext("/")),
"/");
});
//*********************************************************************************************
//TODO better map meta model path to pure JSON path (look up inside JsonModel)?
// what about @sapui.name then, which requires a literal as expected result?
// --> we could distinguish "/<path>" from "<literal>"
forEach({
// "JSON" drill-down ----------------------------------------------------------------------
"/$EntityContainer" : "tea_busi.DefaultContainer",
"/tea_busi./$kind" : "Schema",
"/tea_busi.DefaultContainer/$kind" : "EntityContainer",
// trailing slash: object vs. name --------------------------------------------------------
"/" : oContainerData,
"/$EntityContainer/" : oContainerData,
"/T€AMS/" : oTeamData,
"/T€AMS/$Type/" : oTeamData,
// scope lookup ("17.3 QualifiedName") ----------------------------------------------------
"/$EntityContainer/$kind" : "EntityContainer",
"/$EntityContainer/T€AMS/$Type" : "tea_busi.TEAM",
"/$EntityContainer/T€AMS/$Type/Team_Id" : oTeamData.Team_Id,
// "17.3 QualifiedName", e.g. type cast ---------------------------------------------------
"/tea_busi." : mScope["tea_busi."], // access to schema
"/tea_busi.DefaultContainer/EMPLOYEES/tea_busi.Worker/AGE" : oWorkerData.AGE,
// implicit $Type insertion ---------------------------------------------------------------
"/T€AMS/Team_Id" : oTeamData.Team_Id,
"/T€AMS/TEAM_2_EMPLOYEES" : oTeamData.TEAM_2_EMPLOYEES,
"/T€AMS/TEAM_2_EMPLOYEES/AGE" : oWorkerData.AGE,
// scope lookup, then implicit $Type insertion!
"/$$Term/AGE" : oWorkerData.AGE,
// "17.2 SimpleIdentifier": lookup inside current schema child ----------------------------
"/T€AMS" : oContainerData["T€AMS"],
"/T€AMS/$NavigationPropertyBinding/TEAM_2_EMPLOYEES/" : oWorkerData,
"/T€AMS/$NavigationPropertyBinding/TEAM_2_EMPLOYEES/$Type" : "tea_busi.Worker",
"/T€AMS/$NavigationPropertyBinding/TEAM_2_EMPLOYEES/AGE" : oWorkerData.AGE,
// operations -----------------------------------------------------------------------------
"/OverloadedAction" : oContainerData["OverloadedAction"],
"/OverloadedAction/$Action" : "name.space.OverloadedAction",
"/ChangeManagerOfTeam/" : oTeamData,
//TODO mScope[mScope["..."][0].$ReturnType.$Type] is where the next OData simple identifier
// would live in case of entity/complex type, but we would like to avoid warnings for
// primitive types - how to tell the difference?
// "/GetEmployeeMaxAge/" : "Edm.Int16",
// Note: "value" is a symbolic name for the whole return type iff. it is primitive
"/GetEmployeeMaxAge/value" : mScope["tea_busi.FuGetEmployeeMaxAge"][0].$ReturnType,
"/GetEmployeeMaxAge/value/$Type" : "Edm.Int16", // path may continue!
"/tea_busi.FuGetEmployeeMaxAge/value"
: mScope["tea_busi.FuGetEmployeeMaxAge"][0].$ReturnType,
"/name.space.DerivedPrimitiveFunction/value"
//TODO merge facets of return type and type definition?!
: mScope["name.space.DerivedPrimitiveFunction"][0].$ReturnType,
"/ChangeManagerOfTeam/value" : oTeamData.value,
// action overloads -----------------------------------------------------------------------
//TODO @$ui5.overload: support for split segments? etc.
"/OverloadedAction/@$ui5.overload" : sinon.match.array.deepEquals([aOverloadedAction[2]]),
"/OverloadedAction/@$ui5.overload/0" : aOverloadedAction[2],
// Note: trailing slash does not make a difference in "JSON" drill-down
"/OverloadedAction/@$ui5.overload/0/$ReturnType/" : aOverloadedAction[2].$ReturnType,
"/OverloadedAction/@$ui5.overload/0/$ReturnType/$Type" : "tea_busi.ComplexType_Salary",
"/OverloadedAction/" : mScope["tea_busi.ComplexType_Salary"],
"/name.space.OverloadedAction" : aOverloadedAction,
"/T€AMS/NotFound/name.space.OverloadedAction" : aOverloadedAction,
"/name.space.OverloadedAction/1" : aOverloadedAction[1],
"/OverloadedAction/$Action/1" : aOverloadedAction[1],
"/OverloadedAction/@$ui5.overload/AMOUNT" : mScope["tea_busi.ComplexType_Salary"].AMOUNT,
"/OverloadedAction/AMOUNT" : mScope["tea_busi.ComplexType_Salary"].AMOUNT,
"/T€AMS/name.space.OverloadedAction/Team_Id" : oTeamData.Team_Id,
"/T€AMS/name.space.OverloadedAction/@$ui5.overload"
: sinon.match.array.deepEquals([aOverloadedAction[1]]),
"/name.space.OverloadedAction/@$ui5.overload" : sinon.match.array.deepEquals([]),
// only "Action" and "Function" is expected as $kind, but others are not filtered out!
"/name.space.BrokenOverloads"
: sinon.match.array.deepEquals(mScope["name.space.BrokenOverloads"]),
// annotations ----------------------------------------------------------------------------
"/@DefaultContainer"
: mScope.$Annotations["tea_busi.DefaultContainer"]["@DefaultContainer"],
"/tea_busi.DefaultContainer@DefaultContainer"
: mScope.$Annotations["tea_busi.DefaultContainer"]["@DefaultContainer"],
"/tea_busi.DefaultContainer/@DefaultContainer" // w/o $Type, slash makes no difference!
: mScope.$Annotations["tea_busi.DefaultContainer"]["@DefaultContainer"],
"/$EntityContainer@DefaultContainer" // Note: we could change this
: mScope.$Annotations["tea_busi.DefaultContainer"]["@DefaultContainer"],
"/$EntityContainer/@DefaultContainer" // w/o $Type, slash makes no difference!
: mScope.$Annotations["tea_busi.DefaultContainer"]["@DefaultContainer"],
"/T€AMS/$Type/@UI.LineItem" : oTeamLineItem,
"/T€AMS/@UI.LineItem" : oTeamLineItem,
"/T€AMS/@UI.LineItem/0/Label" : oTeamLineItem[0].Label,
"/T€AMS/@UI.LineItem/0/@UI.Importance" : oTeamLineItem[0]["@UI.Importance"],
"/T€AMS@T€AMS"
: mScope.$Annotations["tea_busi.DefaultContainer/T€AMS"]["@T€AMS"],
"/T€AMS/@Common.Text"
: mScope.$Annotations["tea_busi.TEAM"]["@Common.Text"],
"/T€AMS/@Common.Text@UI.TextArrangement"
: mScope.$Annotations["tea_busi.TEAM"]["@Common.Text@UI.TextArrangement"],
"/T€AMS/Team_Id@Common.Text"
: mScope.$Annotations["tea_busi.TEAM/Team_Id"]["@Common.Text"],
"/T€AMS/Team_Id@Common.Text@UI.TextArrangement"
: mScope.$Annotations["tea_busi.TEAM/Team_Id"]["@Common.Text@UI.TextArrangement"],
"/tea_busi./@Schema" : mScope["tea_busi."]["@Schema"],
// inline annotations
"/ChangeManagerOfTeam/$Action/0/$ReturnType/@Common.Label" : "Hail to the Chief",
"/T€AMS/TEAM_2_EMPLOYEES/$OnDelete@Common.Label" : "None of my business",
"/T€AMS/TEAM_2_EMPLOYEES/$ReferentialConstraint/foo@Common.Label" : "Just a Gigolo",
"/T€AMS/@UI.LineItem/0/Label@Common.Label" : "Team ID's Label",
"/T€AMS/@UI.Badge@Common.Label" : "Best Badge Ever!", // annotation of annotation
"/T€AMS/@UI.Badge/@Common.Label" : "Label inside", // annotation of record
// "@" to access to all annotations, e.g. for iteration
"/T€AMS@" : mScope.$Annotations["tea_busi.DefaultContainer/T€AMS"],
"/T€AMS/@" : mScope.$Annotations["tea_busi.TEAM"],
"/T€AMS/Team_Id@" : mScope.$Annotations["tea_busi.TEAM/Team_Id"],
// "14.5.12 Expression edm:Path"
// Note: see integration test "{field>Value/$Path@com.sap.vocabularies.Common.v1.Label}"
"/T€AMS/@UI.LineItem/0/Value/$Path@Common.Text"
: mScope.$Annotations["tea_busi.TEAM/Team_Id"]["@Common.Text"],
"/T€AMS/@UI.LineItem/0/Value/$Path/@Common.Label"
: mScope.$Annotations["name.space.Id"]["@Common.Label"],
"/EMPLOYEES/@UI.LineItem/0/Value/$Path@Common.Text"
: mScope.$Annotations["tea_busi.TEAM/Team_Id"]["@Common.Text"],
// "14.5.2 Expression edm:AnnotationPath"
"/EMPLOYEES/@UI.Facets/0/Target/$AnnotationPath/"
: mScope.$Annotations["tea_busi.Worker"]["@UI.LineItem"],
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath/"
: mScope.$Annotations["tea_busi.Worker/EMPLOYEE_2_TEAM"]["@Common.Label"],
"/EMPLOYEES/@UI.Facets/2/Target/$AnnotationPath/"
: mScope.$Annotations["tea_busi.TEAM"]["@UI.LineItem"],
"/EMPLOYEES/@UI.Facets/3/Target/$AnnotationPath/"
: mScope.$Annotations["tea_busi.TEAM"]["@UI.LineItem"],
// @sapui.name ----------------------------------------------------------------------------
"/@sapui.name" : "tea_busi.DefaultContainer",
"/tea_busi.DefaultContainer@sapui.name" : "tea_busi.DefaultContainer",
"/tea_busi.DefaultContainer/@sapui.name" : "tea_busi.DefaultContainer", // no $Type here!
"/$EntityContainer/@sapui.name" : "tea_busi.DefaultContainer",
"/T€AMS@sapui.name" : "T€AMS",
"/T€AMS/@sapui.name" : "tea_busi.TEAM",
"/T€AMS/Team_Id@sapui.name" : "Team_Id",
"/T€AMS/TEAM_2_EMPLOYEES@sapui.name" : "TEAM_2_EMPLOYEES",
"/T€AMS/$NavigationPropertyBinding/TEAM_2_EMPLOYEES/@sapui.name" : "tea_busi.Worker",
"/T€AMS/$NavigationPropertyBinding/TEAM_2_EMPLOYEES/AGE@sapui.name" : "AGE",
"/T€AMS@T€AMS@sapui.name" : "@T€AMS",
"/T€AMS@/@T€AMS@sapui.name" : "@T€AMS",
"/T€AMS@T€AMS/@sapui.name" : "@T€AMS", // no $Type inside @T€AMS, / makes no difference!
"/T€AMS@/@T€AMS/@sapui.name" : "@T€AMS", // dito
"/T€AMS/@UI.LineItem/0/@UI.Importance/@sapui.name" : "@UI.Importance", // in "JSON" mode
"/T€AMS/Team_Id@/@Common.Label@sapui.name" : "@Common.Label" // avoid indirection here!
}, function (sPath, vResult) {
QUnit.test("fetchObject: " + sPath, function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
if (vResult && typeof vResult === "object" && "test" in vResult) {
// Sinon.JS matcher
assert.ok(vResult.test(oSyncPromise.getResult()), vResult);
} else {
assert.strictEqual(oSyncPromise.getResult(), vResult);
}
// self-guard to avoid that a complex right-hand side evaluates to undefined
assert.notStrictEqual(vResult, undefined, "use this test for defined results only!");
});
});
//TODO annotations at enum member ".../<10.2.1 Member Name>@..." (Note: "<10.2.2 Member Value>"
// might be a string! Avoid indirection!)
//TODO special cases where inline and external targeting annotations need to be merged!
//TODO support also external targeting from a different schema!
//TODO MySchema.MyFunction/MyParameter --> requires search in array?!
//TODO $count?
//TODO "For annotations targeting a property of an entity type or complex type, the path
// expression is evaluated starting at the outermost entity type or complex type named in the
// Target of the enclosing edm:Annotations element, i.e. an empty path resolves to the
// outermost type, and the first segment of a non-empty path MUST be a property or navigation
// property of the outermost type, a type cast, or a term cast." --> consequences for us?
//*********************************************************************************************
[
// "JSON" drill-down ----------------------------------------------------------------------
"/$missing",
"/tea_busi.DefaultContainer/$missing",
"/tea_busi.DefaultContainer/missing", // "17.2 SimpleIdentifier" treated like any property
"/tea_busi.FuGetEmployeeMaxAge/0/tea_busi.FuGetEmployeeMaxAge", // "0" switches to JSON
"/tea_busi.TEAM/$Key/this.is.missing",
"/tea_busi.Worker/missing", // entity container (see above) treated like any schema child
// scope lookup ("17.3 QualifiedName") ----------------------------------------------------
"/$EntityContainer/$missing",
"/$EntityContainer/missing",
// implicit $Type insertion ---------------------------------------------------------------
"/T€AMS/$Key", // avoid $Type insertion for following $ segments
"/T€AMS/missing",
"/T€AMS/$missing",
// annotations ----------------------------------------------------------------------------
"/tea_busi.Worker@missing",
"/tea_busi.Worker/@missing",
// "@" to access to all annotations, e.g. for iteration
"/tea_busi.Worker/@/@missing",
// operations -----------------------------------------------------------------------------
"/VoidAction/"
].forEach(function (sPath) {
QUnit.test("fetchObject: " + sPath + " --> undefined", function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), undefined);
});
});
//*********************************************************************************************
QUnit.test("fetchObject: Invalid relative path w/o context", function (assert) {
var sMetaPath = "some/relative/path",
oSyncPromise;
this.oLogMock.expects("error").withExactArgs("Invalid relative path w/o context", sMetaPath,
sODataMetaModel);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sMetaPath, null);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), null);
});
//*********************************************************************************************
["/empty.Container/@", "/T€AMS/Name@"].forEach(function (sPath) {
QUnit.test("fetchObject returns {} (anonymous empty object): " + sPath, function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.deepEqual(oSyncPromise.getResult(), {}); // strictEqual would not work!
});
});
//*********************************************************************************************
QUnit.test("fetchObject without $Annotations", function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mMostlyEmptyScope));
// code under test
oSyncPromise = this.oMetaModel.fetchObject("/@DefaultContainer");
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.deepEqual(oSyncPromise.getResult(), undefined); // strictEqual would not work!
});
//TODO if no annotations exist for an external target, avoid {} internally unless "@" is used?
//*********************************************************************************************
[false, true].forEach(function (bWarn) {
forEach({
"/$$Loop/" : "Invalid recursion at /$$Loop",
// Invalid segment (warning) ----------------------------------------------------------
"//$Foo" : "Invalid empty segment",
"/tea_busi./$Annotations" : "Invalid segment: $Annotations", // entrance forbidden!
// Unknown ... ------------------------------------------------------------------------
"/not.Found" : "Unknown qualified name not.Found",
"/Me/not.Found" : "Unknown qualified name not.Found", // no "at /.../undefined"!
"/not.Found@missing" : "Unknown qualified name not.Found",
"/." : "Unknown child . of tea_busi.DefaultContainer",
"/Foo" : "Unknown child Foo of tea_busi.DefaultContainer",
"/$EntityContainer/$kind/" : "Unknown child EntityContainer"
+ " of tea_busi.DefaultContainer at /$EntityContainer/$kind",
// implicit $Action, $Function, $Type insertion
"/name.space.BadContainer/DanglingActionImport/" : "Unknown qualified name not.Found"
+ " at /name.space.BadContainer/DanglingActionImport/$Action",
"/name.space.BadContainer/DanglingFunctionImport/" :
"Unknown qualified name not.Found"
+ " at /name.space.BadContainer/DanglingFunctionImport/$Function",
"/name.space.Broken/" :
"Unknown qualified name not.Found at /name.space.Broken/$Type",
"/name.space.BrokenFunction/" : "Unknown qualified name not.Found"
+ " at /name.space.BrokenFunction/0/$ReturnType/$Type",
//TODO align with "/GetEmployeeMaxAge/" : "Edm.Int16"
"/GetEmployeeMaxAge/@sapui.name" : "Unknown qualified name Edm.Int16"
+ " at /tea_busi.FuGetEmployeeMaxAge/0/$ReturnType/$Type",
"/GetEmployeeMaxAge/value/@sapui.name" : "Unknown qualified name Edm.Int16"
+ " at /tea_busi.FuGetEmployeeMaxAge/0/$ReturnType/$Type",
// implicit scope lookup
"/name.space.Broken/$Type/" :
"Unknown qualified name not.Found at /name.space.Broken/$Type",
"/tea_busi.DefaultContainer/$kind/@sapui.name" : "Unknown child EntityContainer"
+ " of tea_busi.DefaultContainer at /tea_busi.DefaultContainer/$kind",
// Unsupported path before @sapui.name ------------------------------------------------
"/$EntityContainer@sapui.name" : "Unsupported path before @sapui.name",
"/tea_busi.FuGetEmployeeMaxAge/0@sapui.name" : "Unsupported path before @sapui.name",
"/tea_busi.TEAM/$Key/not.Found/@sapui.name" : "Unsupported path before @sapui.name",
"/GetEmployeeMaxAge/value@sapui.name" : "Unsupported path before @sapui.name",
// Unsupported path after @sapui.name -------------------------------------------------
"/@sapui.name/foo" : "Unsupported path after @sapui.name",
"/$EntityContainer/T€AMS/@sapui.name/foo" : "Unsupported path after @sapui.name",
// Unsupported path after @@... -------------------------------------------------------
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath@@this.is.ignored/foo"
: "Unsupported path after @@this.is.ignored",
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath/@@this.is.ignored@foo"
: "Unsupported path after @@this.is.ignored",
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath@@this.is.ignored@sapui.name"
: "Unsupported path after @@this.is.ignored",
// ...is not a function but... --------------------------------------------------------
"/@@sap.ui.model.odata.v4.AnnotationHelper.invalid"
: "sap.ui.model.odata.v4.AnnotationHelper.invalid is not a function but: undefined",
"/@@sap.ui.model.odata.v4.AnnotationHelper"
: "sap.ui.model.odata.v4.AnnotationHelper is not a function but: "
+ sap.ui.model.odata.v4.AnnotationHelper,
// Unsupported overloads --------------------------------------------------------------
"/name.space.EmptyOverloads/" : "Unsupported overloads",
"/name.space.OverloadedAction/" : "Unsupported overloads",
"/name.space.OverloadedFunction/" : "Unsupported overloads"
}, function (sPath, sWarning) {
QUnit.test("fetchObject fails: " + sPath + ", warn = " + bWarn, function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.WARNING, sODataMetaModel).returns(bWarn);
this.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs(sWarning, sPath, sODataMetaModel);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.deepEqual(oSyncPromise.getResult(), undefined);
});
});
});
//*********************************************************************************************
[false, true].forEach(function (bDebug) {
forEach({
// Invalid segment (debug) ------------------------------------------------------------
"/$Foo/@bar" : "Invalid segment: @bar",
"/$Foo/$Bar" : "Invalid segment: $Bar",
"/$Foo/$Bar/$Baz" : "Invalid segment: $Bar",
"/$EntityContainer/T€AMS/Team_Id/$MaxLength/." : "Invalid segment: .",
"/$EntityContainer/T€AMS/Team_Id/$Nullable/." : "Invalid segment: .",
"/$EntityContainer/T€AMS/Team_Id/NotFound/Invalid" : "Invalid segment: Invalid"
}, function (sPath, sMessage) {
QUnit.test("fetchObject fails: " + sPath + ", debug = " + bDebug, function (assert) {
var oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.DEBUG, sODataMetaModel).returns(bDebug);
this.oLogMock.expects("debug").exactly(bDebug ? 1 : 0)
.withExactArgs(sMessage, sPath, sODataMetaModel);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.deepEqual(oSyncPromise.getResult(), undefined);
});
});
});
//*********************************************************************************************
[
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath",
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath/"
].forEach(function (sPath) {
QUnit.test("fetchObject: " + sPath + "@@...isMultiple", function (assert) {
var oContext,
oInput,
fnIsMultiple = this.mock(AnnotationHelper).expects("isMultiple"),
oResult = {},
oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1) // see oInput
.returns(SyncPromise.resolve(mScope));
oInput = this.oMetaModel.getObject(sPath);
fnIsMultiple
.withExactArgs(oInput, sinon.match({
context : sinon.match.object,
schemaChildName : "tea_busi.Worker"
})).returns(oResult);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath
+ "@@sap.ui.model.odata.v4.AnnotationHelper.isMultiple");
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), oResult);
oContext = fnIsMultiple.args[0][1].context;
assert.ok(oContext instanceof BaseContext);
assert.strictEqual(oContext.getModel(), this.oMetaModel);
assert.strictEqual(oContext.getPath(), sPath);
assert.strictEqual(oContext.getObject(), oInput);
});
});
//*********************************************************************************************
(function () {
var sPath,
sPathPrefix,
mPathPrefix2SchemaChildName = {
"/EMPLOYEES/@UI.Facets/1/Target/$AnnotationPath" : "tea_busi.Worker",
"/T€AMS/@UI.LineItem/0/Value/$Path@Common.Label" : "tea_busi.TEAM",
"/T€AMS/@UI.LineItem/0/Value/$Path/@Common.Label" : "name.space.Id"
},
sSchemaChildName;
for (sPathPrefix in mPathPrefix2SchemaChildName) {
sPath = sPathPrefix + "@@.computedAnnotation";
sSchemaChildName = mPathPrefix2SchemaChildName[sPathPrefix];
QUnit.test("fetchObject: " + sPath, function (assert) {
var fnComputedAnnotation,
oContext,
oInput,
oResult = {},
oScope = {
computedAnnotation : function () {}
},
oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1) // see oInput
.returns(SyncPromise.resolve(mScope));
oInput = this.oMetaModel.getObject(sPathPrefix);
fnComputedAnnotation = this.mock(oScope).expects("computedAnnotation");
fnComputedAnnotation
.withExactArgs(oInput, sinon.match({
context : sinon.match.object,
schemaChildName : sSchemaChildName
})).returns(oResult);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath, null, {scope : oScope});
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), oResult);
oContext = fnComputedAnnotation.args[0][1].context;
assert.ok(oContext instanceof BaseContext);
assert.strictEqual(oContext.getModel(), this.oMetaModel);
assert.strictEqual(oContext.getPath(), sPathPrefix);
assert.strictEqual(oContext.getObject(), oInput);
});
}
}());
//*********************************************************************************************
[false, true].forEach(function (bWarn) {
QUnit.test("fetchObject: " + "...@@... throws", function (assert) {
var oError = new Error("This call failed intentionally"),
sPath = "/@@sap.ui.model.odata.v4.AnnotationHelper.isMultiple",
oSyncPromise;
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
this.mock(AnnotationHelper).expects("isMultiple")
.throws(oError);
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.WARNING, sODataMetaModel).returns(bWarn);
this.oLogMock.expects("warning").exactly(bWarn ? 1 : 0).withExactArgs(
"Error calling sap.ui.model.odata.v4.AnnotationHelper.isMultiple: " + oError,
sPath, sODataMetaModel);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), undefined);
});
});
//*********************************************************************************************
[false, true].forEach(function (bDebug) {
QUnit.test("fetchObject: cross-service reference, bDebug = " + bDebug, function (assert) {
var mClonedProductScope = clone(mProductScope),
aPromises = [],
oRequestorMock = this.mock(this.oMetaModel.oRequestor),
that = this;
/*
* Expect the given debug message with the given path.
*/
function expectDebug(sMessage, sPath) {
that.expectDebug(bDebug, sMessage, sPath);
}
/*
* Code under test: ODataMetaModel#fetchObject with the given path should yield the
* given expected result.
*/
function codeUnderTest(sPath, vExpectedResult) {
aPromises.push(that.oMetaModel.fetchObject(sPath).then(function (vResult) {
assert.strictEqual(vResult, vExpectedResult);
}));
}
this.expectFetchEntityContainer(mXServiceScope);
oRequestorMock.expects("read")
.withExactArgs("/a/default/iwbep/tea_busi_product/0001/$metadata")
.returns(Promise.resolve(mClonedProductScope));
oRequestorMock.expects("read")
.withExactArgs("/a/default/iwbep/tea_busi_supplier/0001/$metadata")
.returns(Promise.resolve(mSupplierScope));
oRequestorMock.expects("read")
.withExactArgs("/empty/$metadata")
.returns(Promise.resolve(mMostlyEmptyScope));
expectDebug("Namespace tea_busi_product.v0001. found in $Include"
+ " of /a/default/iwbep/tea_busi_product/0001/$metadata"
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/Name");
expectDebug("Reading /a/default/iwbep/tea_busi_product/0001/$metadata"
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/Name");
expectDebug("Waiting for tea_busi_product.v0001."
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/Name");
codeUnderTest("/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/Name",
mClonedProductScope["tea_busi_product.v0001.Product"].Name);
expectDebug("Waiting for tea_busi_product.v0001."
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_CATEGORY/CategoryName");
codeUnderTest("/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_CATEGORY/CategoryName",
mClonedProductScope["tea_busi_product.v0001.Category"].CategoryName);
expectDebug("Waiting for tea_busi_product.v0001.",
"/tea_busi_product.v0001.Category/CategoryName");
codeUnderTest("/tea_busi_product.v0001.Category/CategoryName",
mClonedProductScope["tea_busi_product.v0001.Category"].CategoryName);
expectDebug("Waiting for tea_busi_product.v0001.",
"/tea_busi_product.v0001.Category/CategoryName@Common.Label");
codeUnderTest("/tea_busi_product.v0001.Category/CategoryName@Common.Label",
"CategoryName from tea_busi_product.v0001.");
expectDebug("Waiting for tea_busi_product.v0001."
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name");
codeUnderTest("/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name",
mSupplierScope["tea_busi_supplier.v0001.Supplier"].Supplier_Name);
expectDebug("Namespace empty. found in $Include of /empty/$metadata",
"/empty.DefaultContainer");
expectDebug("Reading /empty/$metadata", "/empty.DefaultContainer");
expectDebug("Waiting for empty.",
"/empty.DefaultContainer");
codeUnderTest("/empty.DefaultContainer", mMostlyEmptyScope["empty.DefaultContainer"]);
// Note: these are logged asynchronously!
expectDebug("Including tea_busi_product.v0001."
+ " from /a/default/iwbep/tea_busi_product/0001/$metadata"
+ " at /tea_busi.v0001.EQUIPMENT/EQUIPMENT_2_PRODUCT/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/Name");
expectDebug("Including empty. from /empty/$metadata",
"/empty.DefaultContainer");
expectDebug("Namespace tea_busi_supplier.v0001. found in $Include"
+ " of /a/default/iwbep/tea_busi_supplier/0001/$metadata"
+ " at /tea_busi_product.v0001.Product/PRODUCT_2_SUPPLIER/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name");
expectDebug("Reading /a/default/iwbep/tea_busi_supplier/0001/$metadata"
+ " at /tea_busi_product.v0001.Product/PRODUCT_2_SUPPLIER/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name");
expectDebug("Waiting for tea_busi_supplier.v0001."
+ " at /tea_busi_product.v0001.Product/PRODUCT_2_SUPPLIER/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name");
expectDebug("Including tea_busi_supplier.v0001."
+ " from /a/default/iwbep/tea_busi_supplier/0001/$metadata"
+ " at /tea_busi_product.v0001.Product/PRODUCT_2_SUPPLIER/$Type",
"/EQUIPM€NTS/EQUIPMENT_2_PRODUCT/PRODUCT_2_SUPPLIER/Supplier_Name");
return Promise.all(aPromises);
});
});
//TODO Decision: It is an error if a namespace is referenced multiple times with different URIs.
// This should be checked even when load-on-demand is used.
// (It should not even be included multiple times with the same URI!)
//TODO Check that no namespace is included which is already present!
//TODO API to load "transitive closure"
//TODO support for sync. XML Templating
//*********************************************************************************************
[false, true].forEach(function (bWarn) {
var sTitle = "fetchObject: missing cross-service reference, bWarn = " + bWarn;
QUnit.test(sTitle, function (assert) {
var sPath = "/not.found",
oSyncPromise;
this.expectFetchEntityContainer(mMostlyEmptyScope);
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.WARNING, sODataMetaModel).returns(bWarn);
this.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("Unknown qualified name not.found", sPath, sODataMetaModel);
// code under test
oSyncPromise = this.oMetaModel.fetchObject(sPath);
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.deepEqual(oSyncPromise.getResult(), undefined);
});
});
//*********************************************************************************************
[false, true].forEach(function (bWarn) {
var sTitle = "fetchObject: referenced metadata does not contain included schema, bWarn = "
+ bWarn;
QUnit.test(sTitle, function (assert) {
var sSchemaName = "I.still.haven't.found.what.I'm.looking.for.",
sQualifiedName = sSchemaName + "Child",
sPath = "/" + sQualifiedName;
this.expectFetchEntityContainer(mXServiceScope);
this.mock(this.oMetaModel.oRequestor).expects("read")
.withExactArgs("/empty/$metadata")
.returns(Promise.resolve(mMostlyEmptyScope));
this.allowWarnings(assert, bWarn);
this.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("/empty/$metadata does not contain " + sSchemaName, sPath,
sODataMetaModel);
this.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("Unknown qualified name " + sQualifiedName, sPath, sODataMetaModel);
// code under test
return this.oMetaModel.fetchObject(sPath).then(function (vResult) {
assert.deepEqual(vResult, undefined);
});
});
});
//*********************************************************************************************
[false, true].forEach(function (bWarn) {
var sTitle = "fetchObject: cross-service reference, respect $Include; bWarn = " + bWarn;
QUnit.test(sTitle, function (assert) {
var mScope0 = {
"$Version" : "4.0",
"$Reference" : {
"../../../../default/iwbep/tea_busi_product/0001/$metadata" : {
"$Include" : [
"not.found.",
"tea_busi_product.v0001.",
"tea_busi_supplier.v0001."
]
}
}
},
mReferencedScope = {
"$Version" : "4.0",
"must.not.be.included." : {
"$kind" : "Schema"
},
"tea_busi_product.v0001." : {
"$kind" : "Schema"
},
"tea_busi_supplier.v0001." : {
"$kind" : "Schema"
}
},
oRequestorMock = this.mock(this.oMetaModel.oRequestor),
that = this;
this.expectFetchEntityContainer(mScope0);
oRequestorMock.expects("read")
.withExactArgs("/a/default/iwbep/tea_busi_product/0001/$metadata")
.returns(Promise.resolve(mReferencedScope));
this.allowWarnings(assert, bWarn);
// code under test
return this.oMetaModel.fetchObject("/tea_busi_product.v0001.").then(function (vResult) {
var oSyncPromise;
assert.strictEqual(vResult, mReferencedScope["tea_busi_product.v0001."]);
assert.ok(that.oMetaModel.mSchema2MetadataUrl["tea_busi_product.v0001."]
["/a/default/iwbep/tea_busi_product/0001/$metadata"],
"document marked as read");
that.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("Unknown qualified name must.not.be.included.",
"/must.not.be.included.", sODataMetaModel);
assert.strictEqual(that.oMetaModel.getObject("/must.not.be.included."),
undefined,
"must not include schemata which are not mentioned in edmx:Include");
assert.strictEqual(that.oMetaModel.getObject("/tea_busi_supplier.v0001."),
mReferencedScope["tea_busi_supplier.v0001."]);
// now check that "not.found." does not trigger another read(),
// does finish synchronously and logs a warning
that.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("/a/default/iwbep/tea_busi_product/0001/$metadata"
+ " does not contain not.found.",
"/not.found.", sODataMetaModel);
that.oLogMock.expects("warning").exactly(bWarn ? 1 : 0)
.withExactArgs("Unknown qualified name not.found.",
"/not.found.", sODataMetaModel);
// code under test
oSyncPromise = that.oMetaModel.fetchObject("/not.found.");
assert.strictEqual(oSyncPromise.isFulfilled(), true);
assert.strictEqual(oSyncPromise.getResult(), undefined);
});
});
});
//*********************************************************************************************
QUnit.test("fetchObject: cross-service reference - validation failure", function (assert) {
var oError = new Error(),
mReferencedScope = {},
sUrl = "/a/default/iwbep/tea_busi_product/0001/$metadata";
this.expectFetchEntityContainer(mXServiceScope);
this.mock(this.oMetaModel.oRequestor).expects("read").withExactArgs(sUrl)
.returns(Promise.resolve(mReferencedScope));
this.oMetaModelMock.expects("validate")
.withExactArgs(sUrl, mReferencedScope)
.throws(oError);
return this.oMetaModel.fetchObject("/tea_busi_product.v0001.Product").then(function () {
assert.ok(false);
}, function (oError0) {
assert.strictEqual(oError0, oError);
});
});
//*********************************************************************************************
QUnit.test("fetchObject: cross-service reference - document loaded from different URI",
function (assert) {
var sMessage = "A schema cannot span more than one document: schema is referenced by"
+ " following URLs: /a/default/iwbep/tea_busi_product/0001/$metadata,"
+ " /second/reference",
sSchema = "tea_busi_product.v0001.";
this.expectFetchEntityContainer(mXServiceScope);
this.oLogMock.expects("error")
.withExactArgs(sMessage, sSchema, sODataMetaModel);
// simulate 2 references for a schema
this.oMetaModel.mSchema2MetadataUrl["tea_busi_product.v0001."]["/second/reference"] = false;
// code under test
return this.oMetaModel.fetchObject("/tea_busi_product.v0001.Product").then(function () {
assert.ok(false);
}, function (oError0) {
assert.strictEqual(oError0.message, sSchema + ": " + sMessage);
});
});
//*********************************************************************************************
QUnit.test("fetchObject: cross-service reference - duplicate include", function (assert) {
var oRequestorMock = this.mock(this.oMetaModel.oRequestor),
// root service includes both A and B, A also includes B
mScope0 = {
"$Version" : "4.0",
"$Reference" : {
"/A/$metadata" : {
"$Include" : [
"A."
]
},
"/B/$metadata" : {
"$Include" : [
"B."
]
}
}
},
mScopeA = {
"$Version" : "4.0",
"$Reference" : {
"/B/$metadata" : {
"$Include" : [
"B.",
"B.B." // includes additional namespace from already read document
]
}
},
"A." : {
"$kind" : "Schema"
}
},
mScopeB = {
"$Version" : "4.0",
"B." : {
"$kind" : "Schema"
},
"B.B." : {
"$kind" : "Schema"
}
},
that = this;
this.expectFetchEntityContainer(mScope0);
oRequestorMock.expects("read").withExactArgs("/A/$metadata")
.returns(Promise.resolve(mScopeA));
oRequestorMock.expects("read").withExactArgs("/B/$metadata")
.returns(Promise.resolve(mScopeB));
return this.oMetaModel.fetchObject("/B.")
.then(function (vResult) {
assert.strictEqual(vResult, mScopeB["B."]);
// code under test - we must not overwrite our "$ui5.read" promise!
return that.oMetaModel.fetchObject("/A.")
.then(function (vResult) {
assert.strictEqual(vResult, mScopeA["A."]);
// Note: must not trigger read() again!
return that.oMetaModel.fetchObject("/B.B.")
.then(function (vResult) {
assert.strictEqual(vResult, mScopeB["B.B."]);
});
});
});
});
//TODO Implement consistency checks that the same namespace is always included from the same
// reference URI, no matter which referencing document.
//*********************************************************************************************
[undefined, false, true].forEach(function (bSupportReferences) {
var sTitle = "fetchObject: cross-service reference - supportReferences: "
+ bSupportReferences;
QUnit.test(sTitle, function (assert) {
var mClonedProductScope = clone(mProductScope),
oModel = new ODataModel({ // code under test
serviceUrl : "/a/b/c/d/e/",
supportReferences : bSupportReferences,
synchronizationMode : "None"
}),
sPath = "/tea_busi_product.v0001.Product",
sUrl = "/a/default/iwbep/tea_busi_product/0001/$metadata";
this.oMetaModel = oModel.getMetaModel();
this.oMetaModelMock = this.mock(this.oMetaModel);
bSupportReferences = bSupportReferences !== false; // default is true!
assert.strictEqual(this.oMetaModel.bSupportReferences, bSupportReferences);
this.expectFetchEntityContainer(mXServiceScope);
this.mock(this.oMetaModel.oRequestor).expects("read")
.exactly(bSupportReferences ? 1 : 0)
.withExactArgs(sUrl)
.returns(Promise.resolve(mClonedProductScope));
this.allowWarnings(assert, true);
this.oLogMock.expects("warning").exactly(bSupportReferences ? 0 : 1)
.withExactArgs("Unknown qualified name " + sPath.slice(1), sPath, sODataMetaModel);
// code under test
return this.oMetaModel.fetchObject(sPath).then(function (vResult) {
assert.strictEqual(vResult, bSupportReferences
? mClonedProductScope["tea_busi_product.v0001.Product"]
: undefined);
});
});
});
//*********************************************************************************************
QUnit.test("getObject, requestObject", function (assert) {
return checkGetAndRequest(this, assert, "fetchObject", ["sPath", {/*oContext*/}]);
});
//*********************************************************************************************
[{
$Type : "Edm.Boolean"
},{
$Type : "Edm.Byte"
}, {
$Type : "Edm.Date"
}, {
$Type : "Edm.DateTimeOffset"
},{
$Precision : 7,
$Type : "Edm.DateTimeOffset",
__constraints : {precision : 7}
}, {
$Type : "Edm.Decimal"
}, {
$Precision : 20,
$Scale : 5,
$Type : "Edm.Decimal",
__constraints : {maximum : "100.00", maximumExclusive : true, minimum : "0.00",
precision : 20, scale : 5}
}, {
$Precision : 20,
$Scale : "variable",
$Type : "Edm.Decimal",
__constraints : {precision : 20, scale : Infinity}
}, {
$Type : "Edm.Double"
}, {
$Type : "Edm.Guid"
}, {
$Type : "Edm.Int16"
}, {
$Type : "Edm.Int32"
}, {
$Type : "Edm.Int64"
}, {
$Type : "Edm.SByte"
}, {
$Type : "Edm.Single"
}, {
$Type : "Edm.Stream"
}, {
$Type : "Edm.String"
}, {
$MaxLength : 255,
$Type : "Edm.String",
__constraints : {maxLength : 255}
}, {
$Type : "Edm.String",
__constraints : {isDigitSequence : true}
}, {
$Type : "Edm.TimeOfDay"
}, {
$Precision : 3,
$Type : "Edm.TimeOfDay",
__constraints : {precision : 3}
}].forEach(function (oProperty0) {
// Note: take care not to modify oProperty0, clone it first!
[false, true].forEach(function (bNullable) {
// Note: JSON.parse(JSON.stringify(...)) cannot clone Infinity!
var oProperty = jQuery.extend(true, {}, oProperty0),
oConstraints = oProperty.__constraints;
delete oProperty.__constraints;
if (!bNullable) {
oProperty.$Nullable = false;
oConstraints = oConstraints || {};
oConstraints.nullable = false;
}
QUnit.test("fetchUI5Type: " + JSON.stringify(oProperty), function (assert) {
// Note: just spy on fetchModule() to make sure that the real types are used
// which check correctness of constraints
var fnFetchModuleSpy = this.spy(this.oMetaModel, "fetchModule"),
sPath = "/EMPLOYEES/0/ENTRYDATE",
oMetaContext = this.oMetaModel.getMetaContext(sPath),
that = this;
this.oMetaModelMock.expects("fetchObject").twice()
.withExactArgs(undefined, oMetaContext)
.returns(SyncPromise.resolve(oProperty));
if (oProperty.$Type === "Edm.String") { // simulate annotation for strings
this.oMetaModelMock.expects("fetchObject")
.withExactArgs("@com.sap.vocabularies.Common.v1.IsDigitSequence",
oMetaContext)
.returns(
SyncPromise.resolve(oConstraints && oConstraints.isDigitSequence));
} else if (oProperty.$Type === "Edm.Decimal") { // simulate annotation for decimals
this.oMetaModelMock.expects("fetchObject")
.withExactArgs("@Org.OData.Validation.V1.Minimum/$Decimal", oMetaContext)
.returns(
SyncPromise.resolve(oConstraints && oConstraints.minimum));
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(
"@Org.OData.Validation.V1.Minimum@Org.OData.Validation.V1.Exclusive",
oMetaContext)
.returns(
SyncPromise.resolve(oConstraints && oConstraints.minimumExlusive));
this.oMetaModelMock.expects("fetchObject")
.withExactArgs("@Org.OData.Validation.V1.Maximum/$Decimal", oMetaContext)
.returns(
SyncPromise.resolve(oConstraints && oConstraints.maximum));
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(
"@Org.OData.Validation.V1.Maximum@Org.OData.Validation.V1.Exclusive",
oMetaContext)
.returns(
SyncPromise.resolve(oConstraints && oConstraints.maximumExclusive));
}
// code under test
return this.oMetaModel.fetchUI5Type(sPath).then(function (oType) {
var sExpectedTypeName = "sap.ui.model.odata.type."
+ oProperty.$Type.slice(4)/*cut off "Edm."*/;
assert.strictEqual(fnFetchModuleSpy.callCount, 1);
assert.ok(fnFetchModuleSpy.calledOn(that.oMetaModel));
assert.ok(fnFetchModuleSpy.calledWithExactly(sExpectedTypeName),
fnFetchModuleSpy.printf("%C"));
assert.strictEqual(oType.getName(), sExpectedTypeName);
assert.deepEqual(oType.oConstraints, oConstraints);
assert.strictEqual(that.oMetaModel.getUI5Type(sPath), oType, "cached");
});
});
});
});
//TODO later: support for facet DefaultValue?
//*********************************************************************************************
QUnit.test("fetchUI5Type: $count", function (assert) {
var sPath = "/T€AMS/$count",
oType;
// code under test
oType = this.oMetaModel.fetchUI5Type(sPath).getResult();
assert.strictEqual(oType.getName(), "sap.ui.model.odata.type.Int64");
assert.strictEqual(this.oMetaModel.getUI5Type(sPath), oType, "cached");
});
//*********************************************************************************************
QUnit.test("fetchUI5Type: collection", function (assert) {
var sPath = "/EMPLOYEES/0/foo",
that = this;
this.oMetaModelMock.expects("fetchObject").thrice()
.withExactArgs(undefined, this.oMetaModel.getMetaContext(sPath))
.returns(SyncPromise.resolve({
$isCollection : true,
$Nullable : false, // must not be turned into a constraint for Raw!
$Type : "Edm.String"
}));
this.oLogMock.expects("warning").withExactArgs(
"Unsupported collection type, using sap.ui.model.odata.type.Raw",
sPath, sODataMetaModel);
return Promise.all([
// code under test
this.oMetaModel.fetchUI5Type(sPath).then(function (oType) {
assert.strictEqual(oType.getName(), "sap.ui.model.odata.type.Raw");
assert.strictEqual(that.oMetaModel.getUI5Type(sPath), oType, "cached");
}),
// code under test
this.oMetaModel.fetchUI5Type(sPath).then(function (oType) {
assert.strictEqual(oType.getName(), "sap.ui.model.odata.type.Raw");
})
]);
});
//*********************************************************************************************
//TODO make Edm.Duration work with OData V4
["acme.Type", "Edm.Duration", "Edm.GeographyPoint"].forEach(function (sQualifiedName) {
QUnit.test("fetchUI5Type: unsupported type " + sQualifiedName, function (assert) {
var sPath = "/EMPLOYEES/0/foo",
that = this;
this.oMetaModelMock.expects("fetchObject").twice()
.withExactArgs(undefined, this.oMetaModel.getMetaContext(sPath))
.returns(SyncPromise.resolve({
$Nullable : false, // must not be turned into a constraint for Raw!
$Type : sQualifiedName
}));
this.oLogMock.expects("warning").withExactArgs(
"Unsupported type '" + sQualifiedName + "', using sap.ui.model.odata.type.Raw",
sPath, sODataMetaModel);
// code under test
return this.oMetaModel.fetchUI5Type(sPath).then(function (oType) {
assert.strictEqual(oType.getName(), "sap.ui.model.odata.type.Raw");
assert.strictEqual(that.oMetaModel.getUI5Type(sPath), oType, "cached");
});
});
});
//*********************************************************************************************
QUnit.test("fetchUI5Type: invalid path", function (assert) {
var sPath = "/EMPLOYEES/0/invalid",
that = this;
this.oMetaModelMock.expects("fetchObject").twice()
.withExactArgs(undefined, this.oMetaModel.getMetaContext(sPath))
.returns(SyncPromise.resolve(/*no property metadata for path*/));
this.oLogMock.expects("warning").twice().withExactArgs(
"No metadata for path '" + sPath + "', using sap.ui.model.odata.type.Raw",
undefined, sODataMetaModel);
// code under test
return this.oMetaModel.fetchUI5Type(sPath).then(function (oType) {
assert.strictEqual(oType.getName(), "sap.ui.model.odata.type.Raw");
// code under test
assert.strictEqual(that.oMetaModel.getUI5Type(sPath), oType, "Type is cached");
});
});
//*********************************************************************************************
QUnit.test("getUI5Type, requestUI5Type", function (assert) {
return checkGetAndRequest(this, assert, "fetchUI5Type", ["sPath"], true);
});
//*********************************************************************************************
[{ // simple entity from a set
dataPath : "/TEAMS/0",
canonicalUrl : "/TEAMS(~1)",
requests : [{
entityType : "tea_busi.TEAM",
predicate : "(~1)"
}]
}, { // simple entity in transient context
dataPath : "/TEAMS/-1",
canonicalUrl : "/TEAMS(~1)",
requests : [{
entityType : "tea_busi.TEAM",
// TODO a transient entity does not necessarily have all key properties, but this is
// required to create a dependent cache
predicate : "(~1)"
}]
}, { // simple entity by key predicate
dataPath : "/TEAMS('4%3D2')",
canonicalUrl : "/TEAMS('4%3D2')",
requests : []
}, { // simple singleton
dataPath : "/Me",
canonicalUrl : "/Me",
requests : []
}, { // navigation to root entity
dataPath : "/TEAMS/0/TEAM_2_EMPLOYEES/1",
canonicalUrl : "/EMPLOYEES(~1)",
requests : [{
entityType : "tea_busi.Worker",
predicate : "(~1)"
}]
}, { // navigation to root entity
dataPath : "/TEAMS('42')/TEAM_2_EMPLOYEES/1",
canonicalUrl : "/EMPLOYEES(~1)",
requests : [{
entityType : "tea_busi.Worker",
predicate : "(~1)"
}]
}, { // navigation to root entity with key predicate
dataPath : "/TEAMS('42')/TEAM_2_EMPLOYEES('23')",
canonicalUrl : "/EMPLOYEES('23')",
requests : []
}, { // multiple navigation to root entity
dataPath : "/TEAMS/0/TEAM_2_EMPLOYEES/1/EMPLOYEE_2_TEAM",
canonicalUrl : "/T%E2%82%ACAMS(~1)",
requests : [{
entityType : "tea_busi.TEAM",
predicate : "(~1)"
}]
}, { // navigation from entity set to single contained entity
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_S",
canonicalUrl : "/TEAMS(~1)/TEAM_2_CONTAINED_S",
requests : [{
entityType : "tea_busi.TEAM",
path : "/TEAMS/0",
predicate : "(~1)"
}]
}, { // navigation from singleton to single contained entity
dataPath : "/Me/EMPLOYEE_2_CONTAINED_S",
canonicalUrl : "/Me/EMPLOYEE_2_CONTAINED_S",
requests : []
}, { // navigation to contained entity within a collection
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_C/1",
canonicalUrl : "/TEAMS(~1)/TEAM_2_CONTAINED_C(~2)",
requests : [{
entityType : "tea_busi.TEAM",
path : "/TEAMS/0",
predicate : "(~1)"
}, {
entityType : "tea_busi.ContainedC",
path : "/TEAMS/0/TEAM_2_CONTAINED_C/1",
predicate : "(~2)"
}]
}, { // navigation to contained entity with a key predicate
dataPath : "/TEAMS('42')/TEAM_2_CONTAINED_C('foo')",
canonicalUrl : "/TEAMS('42')/TEAM_2_CONTAINED_C('foo')",
requests : []
}, { // navigation from contained entity to contained entity
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_S/S_2_C/1",
canonicalUrl : "/TEAMS(~1)/TEAM_2_CONTAINED_S/S_2_C(~2)",
requests : [{
entityType : "tea_busi.TEAM",
path : "/TEAMS/0",
predicate : "(~1)"
}, {
entityType : "tea_busi.ContainedC",
path : "/TEAMS/0/TEAM_2_CONTAINED_S/S_2_C/1",
predicate : "(~2)"
}]
}, { // navigation from contained to root entity
// must be appended nevertheless since we only have a type, but no set
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_C/5/C_2_EMPLOYEE",
canonicalUrl : "/TEAMS(~1)/TEAM_2_CONTAINED_C(~2)/C_2_EMPLOYEE",
requests : [{
entityType : "tea_busi.TEAM",
path : "/TEAMS/0",
predicate : "(~1)"
}, {
entityType : "tea_busi.ContainedC",
path : "/TEAMS/0/TEAM_2_CONTAINED_C/5",
predicate : "(~2)"
}]
}, { // navigation from entity w/ key predicate to contained to root entity
dataPath : "/TEAMS('42')/TEAM_2_CONTAINED_C/5/C_2_EMPLOYEE",
canonicalUrl : "/TEAMS('42')/TEAM_2_CONTAINED_C(~1)/C_2_EMPLOYEE",
requests : [{
entityType : "tea_busi.ContainedC",
path : "/TEAMS('42')/TEAM_2_CONTAINED_C/5",
predicate : "(~1)"
}]
}, { // decode entity set initially, encode it finally
dataPath : "/T%E2%82%ACAMS/0",
canonicalUrl : "/T%E2%82%ACAMS(~1)",
requests : [{
entityType : "tea_busi.TEAM",
predicate : "(~1)"
}]
}, { // decode navigation property, encode entity set when building sCandidate
dataPath : "/EMPLOYEES('7')/EMPLOYEE_2_EQUIPM%E2%82%ACNTS(42)",
canonicalUrl : "/EQUIPM%E2%82%ACNTS(42)",
requests : []
}].forEach(function (oFixture) {
QUnit.test("fetchCanonicalPath: " + oFixture.dataPath, function (assert) {
var oContext = Context.create(this.oModel, undefined, oFixture.dataPath),
oContextMock = this.mock(oContext),
oPromise;
this.oMetaModelMock.expects("getMetaPath").withExactArgs(oFixture.dataPath)
.returns("metapath");
this.oMetaModelMock.expects("fetchObject").withExactArgs("metapath")
.returns(SyncPromise.resolve());
this.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
oFixture.requests.forEach(function (oRequest) {
var oEntityInstance = {"@$ui5._" : {"predicate" : oRequest.predicate}};
oContextMock.expects("fetchValue")
.withExactArgs(oRequest.path || oFixture.dataPath)
.returns(SyncPromise.resolve(oEntityInstance));
});
// code under test
oPromise = this.oMetaModel.fetchCanonicalPath(oContext);
assert.ok(!oPromise.isRejected());
return oPromise.then(function (sCanonicalUrl) {
assert.strictEqual(sCanonicalUrl, oFixture.canonicalUrl);
});
});
});
//*********************************************************************************************
[{ // simple singleton
path : "/Me|ID",
editUrl : "Me"
}, { // simple entity by key predicate
path : "/TEAMS('42')|Name",
editUrl : "TEAMS('42')"
}, { // simple entity from a set
path : "/TEAMS/0|Name",
fetchPredicates : {
"/TEAMS/0" : "tea_busi.TEAM"
},
editUrl : "TEAMS(~0)"
}, { // simple entity from a set, complex property
path : "/EMPLOYEES/0|SAL%C3%83RY/CURRENCY",
fetchPredicates : {
"/EMPLOYEES/0" : "tea_busi.Worker"
},
editUrl : "EMPLOYEES(~0)"
}, { // navigation to root entity
path : "/TEAMS/0/TEAM_2_EMPLOYEES/1|ID",
fetchPredicates : {
"/TEAMS/0/TEAM_2_EMPLOYEES/1" : "tea_busi.Worker"
},
editUrl : "EMPLOYEES(~0)"
}, { // navigation to root entity
path : "/TEAMS('42')/TEAM_2_EMPLOYEES/1|ID",
fetchPredicates : {
"/TEAMS('42')/TEAM_2_EMPLOYEES/1" : "tea_busi.Worker"
},
editUrl : "EMPLOYEES(~0)"
}, { // navigation to root entity with key predicate
path : "/TEAMS('42')/TEAM_2_EMPLOYEES('23')|ID",
editUrl : "EMPLOYEES('23')"
}, { // multiple navigation to root entity
path : "/TEAMS/0/TEAM_2_EMPLOYEES/1/EMPLOYEE_2_TEAM|Name",
fetchPredicates : {
"/TEAMS/0/TEAM_2_EMPLOYEES/1/EMPLOYEE_2_TEAM" : "tea_busi.TEAM"
},
editUrl : "T%E2%82%ACAMS(~0)"
}, { // navigation from entity set to single contained entity
path : "/TEAMS/0/TEAM_2_CONTAINED_S|Id",
fetchPredicates : {
"/TEAMS/0" : "tea_busi.TEAM"
},
editUrl : "TEAMS(~0)/TEAM_2_CONTAINED_S"
}, { // navigation from singleton to single contained entity
path : "/Me/EMPLOYEE_2_CONTAINED_S|Id",
editUrl : "Me/EMPLOYEE_2_CONTAINED_S"
}, { // navigation to contained entity within a collection
path : "/TEAMS/0/TEAM_2_CONTAINED_C/1|Id",
fetchPredicates : {
"/TEAMS/0" : "tea_busi.TEAM",
"/TEAMS/0/TEAM_2_CONTAINED_C/1" : "tea_busi.ContainedC"
},
editUrl : "TEAMS(~0)/TEAM_2_CONTAINED_C(~1)"
}, { // navigation to contained entity with a key predicate
path : "/TEAMS('42')/TEAM_2_CONTAINED_C('foo')|Id",
editUrl : "TEAMS('42')/TEAM_2_CONTAINED_C('foo')"
}, { // navigation from contained entity to contained entity
path : "/TEAMS/0/TEAM_2_CONTAINED_S/S_2_C/1|Id",
fetchPredicates : {
"/TEAMS/0" : "tea_busi.TEAM",
"/TEAMS/0/TEAM_2_CONTAINED_S/S_2_C/1" : "tea_busi.ContainedC"
},
editUrl : "TEAMS(~0)/TEAM_2_CONTAINED_S/S_2_C(~1)"
}, { // navigation from contained to root entity, resolved via navigation property binding path
path : "/TEAMS/0/TEAM_2_CONTAINED_S/S_2_EMPLOYEE|ID",
fetchPredicates : {
"/TEAMS/0/TEAM_2_CONTAINED_S/S_2_EMPLOYEE" : "tea_busi.Worker"
},
editUrl : "EMPLOYEES(~0)"
}, { // navigation from entity w/ key predicate to contained to root entity
path : "/TEAMS('42')/TEAM_2_CONTAINED_C/5/C_2_EMPLOYEE|ID",
fetchPredicates : {
"/TEAMS('42')/TEAM_2_CONTAINED_C/5" : "tea_busi.ContainedC"
},
editUrl : "TEAMS('42')/TEAM_2_CONTAINED_C(~0)/C_2_EMPLOYEE"
}, { // decode entity set initially, encode it finally
path : "/T%E2%82%ACAMS/0|Name",
fetchPredicates : {
"/T%E2%82%ACAMS/0" : "tea_busi.TEAM"
},
editUrl : "T%E2%82%ACAMS(~0)"
}, { // decode navigation property, encode entity set
path : "/EMPLOYEES('7')/EMPLOYEE_2_EQUIPM%E2%82%ACNTS(42)|ID",
editUrl : "EQUIPM%E2%82%ACNTS(42)"
}].forEach(function (oFixture) {
QUnit.test("fetchUpdateData: " + oFixture.path, function (assert) {
var i = oFixture.path.indexOf("|"),
sContextPath = oFixture.path.slice(0, i),
sPropertyPath = oFixture.path.slice(i + 1),
oContext = Context.create(this.oModel, undefined, sContextPath),
oContextMock = this.mock(oContext),
oPromise,
that = this;
this.oMetaModelMock.expects("getMetaPath")
.withExactArgs(oFixture.path.replace("|", "/")).returns("~");
this.oMetaModelMock.expects("fetchObject").withExactArgs("~")
.returns(SyncPromise.resolve(Promise.resolve()).then(function () {
that.oMetaModelMock.expects("fetchEntityContainer")
.returns(SyncPromise.resolve(mScope));
Object.keys(oFixture.fetchPredicates || {}).forEach(function (sPath, i) {
var oEntityInstance = {"@$ui5._" : {"predicate" : "(~" + i + ")"}};
// Note: the entity instance is delivered asynchronously
oContextMock.expects("fetchValue")
.withExactArgs(sPath)
.returns(SyncPromise.resolve(Promise.resolve(oEntityInstance)));
});
}));
// code under test
oPromise = this.oMetaModel.fetchUpdateData(sPropertyPath, oContext);
assert.ok(!oPromise.isRejected());
return oPromise.then(function (oResult) {
assert.strictEqual(oResult.editUrl, oFixture.editUrl);
assert.strictEqual(oResult.entityPath, sContextPath);
assert.strictEqual(oResult.propertyPath, sPropertyPath);
});
});
});
//TODO support collection properties (-> path containing index not leading to predicate)
//TODO prefer instance annotation at payload for "odata.editLink"?!
//TODO target URLs like "com.sap.gateway.default.iwbep.tea_busi_product.v0001.Container/Products(...)"?
//TODO type casts, operations?
//*********************************************************************************************
QUnit.test("fetchUpdateData: transient entity", function(assert) {
var oContext = Context.create(this.oModel, undefined, "/TEAMS/-1"),
sPropertyPath = "Name";
this.oMetaModelMock.expects("fetchEntityContainer").twice()
.returns(SyncPromise.resolve(mScope));
this.mock(oContext).expects("fetchValue").withExactArgs("/TEAMS/-1")
.returns(SyncPromise.resolve({"@$ui5._" : {"transient" : "update"}}));
// code under test
return this.oMetaModel.fetchUpdateData(sPropertyPath, oContext).then(function (oResult) {
assert.deepEqual(oResult, {
entityPath : "/TEAMS/-1",
editUrl : undefined,
propertyPath : "Name"
});
});
});
//*********************************************************************************************
QUnit.test("fetchUpdateData: fetchObject fails", function(assert) {
var oModel = this.oModel,
oContext = {
getModel : function () { return oModel; }
},
oExpectedError = new Error(),
oMetaModelMock = this.mock(this.oMetaModel),
sPath = "some/invalid/path/to/a/property";
this.mock(oModel).expects("resolve")
.withExactArgs(sPath, sinon.match.same(oContext))
.returns("~1");
oMetaModelMock.expects("getMetaPath").withExactArgs("~1").returns("~2");
oMetaModelMock.expects("fetchObject").withExactArgs("~2")
.returns(Promise.reject(oExpectedError));
// code under test
return this.oMetaModel.fetchUpdateData(sPath, oContext).then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError, oExpectedError);
});
});
//*********************************************************************************************
[{
dataPath : "/Foo/Bar",
message : "Not an entity set: Foo",
warning : "Unknown child Foo of tea_busi.DefaultContainer"
}, {
dataPath : "/TEAMS/0/Foo/Bar",
message : "Not a (navigation) property: Foo"
}, {
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_S",
instance : undefined,
message : "No instance to calculate key predicate at /TEAMS/0"
}, {
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_S",
instance : {},
message : "No key predicate known at /TEAMS/0"
}, {
dataPath : "/TEAMS/0/TEAM_2_CONTAINED_S",
instance : new Error("failed to load team"),
message : "failed to load team at /TEAMS/0"
}].forEach(function (oFixture) {
QUnit.test("fetchUpdateData: " + oFixture.message, function (assert) {
var oContext = Context.create(this.oModel, undefined, oFixture.dataPath),
oPromise;
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(mScope));
if ("instance" in oFixture) {
this.mock(oContext).expects("fetchValue")
.returns(oFixture.instance instanceof Error
? SyncPromise.reject(oFixture.instance)
: SyncPromise.resolve(oFixture.instance));
}
if (oFixture.warning) {
this.oLogMock.expects("isLoggable")
.withExactArgs(jQuery.sap.log.Level.WARNING, sODataMetaModel)
.returns(true);
this.oLogMock.expects("warning")
.withExactArgs(oFixture.warning, oFixture.dataPath, sODataMetaModel);
}
this.mock(this.oModel).expects("reportError")
.withExactArgs(oFixture.message, sODataMetaModel, sinon.match({
message : oFixture.dataPath + ": " + oFixture.message,
name : "Error"
}));
oPromise = this.oMetaModel.fetchUpdateData("", oContext);
assert.ok(oPromise.isRejected());
assert.strictEqual(oPromise.getResult().message,
oFixture.dataPath + ": " + oFixture.message);
oPromise.caught(); // avoid "Uncaught (in promise)"
});
});
//*********************************************************************************************
QUnit.test("fetchCanonicalPath: success", function(assert) {
var oContext = {};
this.mock(this.oMetaModel).expects("fetchUpdateData")
.withExactArgs("", sinon.match.same(oContext))
.returns(SyncPromise.resolve(Promise.resolve({
editUrl : "edit('URL')",
propertyPath : ""
})));
// code under test
return this.oMetaModel.fetchCanonicalPath(oContext).then(function (oCanonicalPath) {
assert.strictEqual(oCanonicalPath, "/edit('URL')");
});
});
//*********************************************************************************************
QUnit.test("fetchCanonicalPath: not an entity", function(assert) {
var oContext = {
getPath : function () { return "/TEAMS('4711')/Name"; }
};
this.mock(this.oMetaModel).expects("fetchUpdateData")
.withExactArgs("", sinon.match.same(oContext))
.returns(SyncPromise.resolve(Promise.resolve({
entityPath : "/TEAMS('4711')",
editUrl : "TEAMS('4711')",
propertyPath : "Name"
})));
// code under test
return this.oMetaModel.fetchCanonicalPath(oContext).then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError.message, "Context " + oContext.getPath()
+ " does not point to an entity. It should be " + "/TEAMS('4711')");
});
});
//*********************************************************************************************
QUnit.test("fetchCanonicalPath: fetchUpdateData fails", function(assert) {
var oContext = {},
oExpectedError = new Error();
this.mock(this.oMetaModel).expects("fetchUpdateData")
.withExactArgs("", sinon.match.same(oContext))
.returns(SyncPromise.resolve(Promise.reject(oExpectedError)));
// code under test
return this.oMetaModel.fetchCanonicalPath(oContext).then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError, oExpectedError);
});
});
//*********************************************************************************************
QUnit.test("getProperty = getObject", function (assert) {
assert.strictEqual(this.oMetaModel.getProperty, this.oMetaModel.getObject);
});
//*********************************************************************************************
QUnit.test("bindProperty", function (assert) {
var oBinding,
oContext = {},
mParameters = {},
sPath = "foo";
// code under test
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
assert.ok(oBinding instanceof PropertyBinding);
assert.ok(oBinding.hasOwnProperty("vValue"));
assert.strictEqual(oBinding.getContext(), oContext);
assert.strictEqual(oBinding.getModel(), this.oMetaModel);
assert.strictEqual(oBinding.getPath(), sPath);
assert.strictEqual(oBinding.mParameters, mParameters, "mParameters available internally");
assert.strictEqual(oBinding.getValue(), undefined);
// code under test: must not call getProperty() again!
assert.strictEqual(oBinding.getExternalValue(), undefined);
// code under test
assert.throws(function () {
oBinding.setExternalValue("foo");
}, /Unsupported operation: ODataMetaPropertyBinding#setValue/);
});
//*********************************************************************************************
[undefined, {}, {$$valueAsPromise : false}].forEach(function (mParameters, i) {
QUnit.test("ODataMetaPropertyBinding#checkUpdate: " + i, function (assert) {
var oBinding,
oContext = {},
sPath = "foo",
oValue = {},
oPromise = SyncPromise.resolve(Promise.resolve(oValue));
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(sPath, sinon.match.same(oContext), sinon.match.same(mParameters))
.returns(oPromise);
this.mock(oBinding).expects("_fireChange")
.withExactArgs({reason : ChangeReason.Change});
// code under test
oBinding.checkUpdate();
assert.strictEqual(oBinding.getValue(), undefined);
oPromise.then(function () {
assert.strictEqual(oBinding.getValue(), oValue);
});
return oPromise;
});
});
//*********************************************************************************************
QUnit.test("ODataMetaPropertyBinding#checkUpdate: $$valueAsPromise=true, sync",
function (assert) {
var oBinding,
oContext = {},
mParameters = {$$valueAsPromise : true},
sPath = "foo",
oValue = {},
oPromise = SyncPromise.resolve(oValue);
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(sPath, sinon.match.same(oContext), sinon.match.same(mParameters))
.returns(oPromise);
this.mock(oBinding).expects("_fireChange").withExactArgs({reason : ChangeReason.Change});
// code under test
oBinding.checkUpdate();
assert.strictEqual(oBinding.getValue(), oValue, "Value sync");
return oPromise;
});
//*********************************************************************************************
QUnit.test("ODataMetaPropertyBinding#checkUpdate: no event", function (assert) {
var oBinding,
oContext = {},
mParameters = {},
sPath = "foo",
oValue = {},
oPromise = SyncPromise.resolve(Promise.resolve(oValue));
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
oBinding.vValue = oValue;
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(sPath, sinon.match.same(oContext), sinon.match.same(mParameters))
.returns(oPromise);
this.mock(oBinding).expects("_fireChange").never();
// code under test
oBinding.checkUpdate();
return oPromise;
});
//*********************************************************************************************
QUnit.test("ODataMetaPropertyBinding#checkUpdate: bForceUpdate, sChangeReason",
function (assert) {
var oBinding,
oContext = {},
mParameters = {},
sPath = "foo",
oValue = {},
oPromise = SyncPromise.resolve(Promise.resolve(oValue));
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
oBinding.vValue = oValue;
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(sPath, sinon.match.same(oContext), sinon.match.same(mParameters))
.returns(oPromise);
this.mock(oBinding).expects("_fireChange").withExactArgs({reason : "Foo"});
// code under test
oBinding.checkUpdate(true, "Foo");
return oPromise;
});
//*********************************************************************************************
QUnit.test("ODataMetaPropertyBinding#checkUpdate: $$valueAsPromise = true", function (assert) {
var oBinding,
oContext = {},
mParameters = {
$$valueAsPromise : true
},
sPath = "foo",
oValue = {},
oPromise = SyncPromise.resolve(Promise.resolve(oValue));
oBinding = this.oMetaModel.bindProperty(sPath, oContext, mParameters);
oBinding.vValue = oValue;
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(sPath, sinon.match.same(oContext), sinon.match.same(mParameters))
.returns(oPromise);
this.mock(oBinding).expects("_fireChange")
.withExactArgs({reason : "Foo"})
.twice()
.onFirstCall().callsFake(function () {
assert.ok(oBinding.getValue().isPending(), "Value is still a pending SyncPromise");
})
.onSecondCall().callsFake(function () {
assert.strictEqual(oBinding.getValue(), oValue, "Value resolved");
});
// code under test
oBinding.checkUpdate(false, "Foo");
assert.ok(oBinding.getValue().isPending(), "Value is a pending SyncPromise");
return oBinding.getValue().then(function (oResult) {
assert.strictEqual(oResult, oValue);
assert.strictEqual(oBinding.getValue(), oValue);
});
});
//*********************************************************************************************
QUnit.test("ODataMetaPropertyBinding#setContext", function (assert) {
var oBinding,
oBindingMock,
oContext = {};
oBinding = this.oMetaModel.bindProperty("Foo", oContext);
oBindingMock = this.mock(oBinding);
oBindingMock.expects("checkUpdate").never();
// code under test
oBinding.setContext(oContext);
oBindingMock.expects("checkUpdate").withExactArgs(false, ChangeReason.Context);
// code under test
oBinding.setContext(undefined);
assert.strictEqual(oBinding.getContext(), undefined);
oBinding = this.oMetaModel.bindProperty("/Foo");
this.mock(oBinding).expects("checkUpdate").never();
// code under test
oBinding.setContext(oContext);
});
//*********************************************************************************************
["ENTRYDATE", "/EMPLOYEES/ENTRYDATE"].forEach(function (sPath) {
QUnit.test("bindContext: " + sPath, function (assert) {
var bAbsolutePath = sPath[0] === "/",
oBinding,
oBoundContext,
iChangeCount = 0,
oContext = this.oMetaModel.getMetaContext("/EMPLOYEES"),
oContextCopy = this.oMetaModel.getMetaContext("/EMPLOYEES"),
oNewContext = this.oMetaModel.getMetaContext("/T€AMS");
// without context
oBinding = this.oMetaModel.bindContext(sPath, null);
assert.ok(oBinding instanceof ContextBinding);
assert.strictEqual(oBinding.getModel(), this.oMetaModel);
assert.strictEqual(oBinding.getPath(), sPath);
assert.strictEqual(oBinding.getContext(), null);
assert.strictEqual(oBinding.isInitial(), true);
assert.strictEqual(oBinding.getBoundContext(), null);
// with context
oBinding = this.oMetaModel.bindContext(sPath, oContextCopy);
assert.ok(oBinding instanceof ContextBinding);
assert.strictEqual(oBinding.getModel(), this.oMetaModel);
assert.strictEqual(oBinding.getPath(), sPath);
assert.strictEqual(oBinding.getContext(), oContextCopy);
assert.strictEqual(oBinding.isInitial(), true);
assert.strictEqual(oBinding.getBoundContext(), null);
// setContext **********
oBinding.attachChange(function (oEvent) {
assert.strictEqual(oEvent.getId(), "change");
iChangeCount += 1;
});
// code under test
oBinding.setContext(oContext);
assert.strictEqual(iChangeCount, 0, "still initial");
assert.strictEqual(oBinding.isInitial(), true);
assert.strictEqual(oBinding.getBoundContext(), null);
assert.strictEqual(oBinding.getContext(), oContext);
// code under test
oBinding.initialize();
assert.strictEqual(iChangeCount, 1, "ManagedObject relies on 'change' event!");
assert.strictEqual(oBinding.isInitial(), false);
oBoundContext = oBinding.getBoundContext();
assert.strictEqual(oBoundContext.getModel(), this.oMetaModel);
assert.strictEqual(oBoundContext.getPath(),
bAbsolutePath ? sPath : oContext.getPath() + "/" + sPath);
// code under test - same context
oBinding.setContext(oContext);
assert.strictEqual(iChangeCount, 1, "context unchanged");
assert.strictEqual(oBinding.getBoundContext(), oBoundContext);
// code under test
oBinding.setContext(oContextCopy);
assert.strictEqual(iChangeCount, 1, "context unchanged");
assert.strictEqual(oBinding.getBoundContext(), oBoundContext);
// code under test
// Note: checks equality on resolved path, not simply object identity of context!
oBinding.setContext(oNewContext);
if (bAbsolutePath) {
assert.strictEqual(iChangeCount, 1, "context unchanged");
assert.strictEqual(oBinding.getBoundContext(), oBoundContext);
} else {
assert.strictEqual(iChangeCount, 2, "context changed");
oBoundContext = oBinding.getBoundContext();
assert.strictEqual(oBoundContext.getModel(), this.oMetaModel);
assert.strictEqual(oBoundContext.getPath(), oNewContext.getPath() + "/" + sPath);
}
// code under test
oBinding.setContext(null);
if (bAbsolutePath) {
assert.strictEqual(iChangeCount, 1, "context unchanged");
assert.strictEqual(oBinding.getBoundContext(), oBoundContext);
} else {
assert.strictEqual(iChangeCount, 3, "context changed");
assert.strictEqual(oBinding.isInitial(), false);
assert.strictEqual(oBinding.getBoundContext(), null);
}
});
});
//*********************************************************************************************
QUnit.test("bindList", function (assert) {
var oBinding,
oContext = this.oMetaModel.getContext("/EMPLOYEES"),
aFilters = [],
sPath = "@",
aSorters = [];
// avoid request to backend during initialization
this.oMetaModelMock.expects("fetchObject").returns(SyncPromise.resolve());
// code under test
oBinding = this.oMetaModel.bindList(sPath, oContext, aSorters, aFilters);
assert.ok(oBinding instanceof ClientListBinding);
assert.strictEqual(oBinding.getModel(), this.oMetaModel);
assert.strictEqual(oBinding.getPath(), sPath);
assert.strictEqual(oBinding.getContext(), oContext);
assert.strictEqual(oBinding.aSorters, aSorters);
assert.strictEqual(oBinding.aApplicationFilters, aFilters);
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#setContexts", function (assert) {
var oBinding,
oBindingMock,
oContext = this.oMetaModel.getContext("/EMPLOYEES"),
aContexts = [],
sPath = "path";
// avoid request to backend during initialization
this.oMetaModelMock.expects("fetchObject").returns(SyncPromise.resolve());
oBinding = this.oMetaModel.bindList(sPath, oContext);
oBindingMock = this.mock(oBinding);
oBindingMock.expects("updateIndices").withExactArgs();
oBindingMock.expects("applyFilter").withExactArgs();
oBindingMock.expects("applySort").withExactArgs();
oBindingMock.expects("_getLength").withExactArgs().returns(42);
// code under test
oBinding.setContexts(aContexts);
assert.strictEqual(oBinding.oList, aContexts);
assert.strictEqual(oBinding.iLength, 42);
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#update (sync)", function (assert) {
var oBinding,
oBindingMock,
oContext = this.oMetaModel.getContext("/EMPLOYEES"),
aContexts = [{}],
sPath = "path";
// avoid request to backend during initialization
this.oMetaModelMock.expects("fetchObject").returns(SyncPromise.resolve());
oBinding = this.oMetaModel.bindList(sPath, oContext);
oBindingMock = this.mock(oBinding);
oBindingMock.expects("fetchContexts").withExactArgs()
.returns(SyncPromise.resolve(aContexts));
oBindingMock.expects("setContexts").withExactArgs(sinon.match.same(aContexts));
oBindingMock.expects("_fireChange").never();
// code under test
oBinding.update();
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#update (async)", function (assert) {
var oBinding,
oBindingMock,
oContext = this.oMetaModel.getContext("/EMPLOYEES"),
aContexts = [{}],
sPath = "path",
oFetchPromise = SyncPromise.resolve(Promise.resolve()).then(function () {
// This is expected to happen after the promise is resolved
oBindingMock.expects("setContexts").withExactArgs(sinon.match.same(aContexts));
oBindingMock.expects("_fireChange").withExactArgs({reason : ChangeReason.Change});
return aContexts;
});
// avoid request to backend during initialization
this.oMetaModelMock.expects("fetchObject").returns(SyncPromise.resolve());
oBinding = this.oMetaModel.bindList(sPath, oContext);
oBindingMock = this.mock(oBinding);
oBindingMock.expects("fetchContexts").withExactArgs().returns(oFetchPromise);
oBindingMock.expects("setContexts").withExactArgs([]);
oBindingMock.expects("_fireChange").never(); // initially
// code under test
oBinding.update();
return oFetchPromise;
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#checkUpdate", function (assert) {
var oBinding,
oBindingMock,
oContext = this.oMetaModel.getContext("/"),
sPath = "";
// avoid request to backend during initialization
this.oMetaModelMock.expects("fetchObject").returns(SyncPromise.resolve());
oBinding = this.oMetaModel.bindList(sPath, oContext);
oBindingMock = this.mock(oBinding);
this.mock(oBinding).expects("update").thrice().callsFake(function () {
this.oList = [{/*a context*/}];
});
oBindingMock.expects("_fireChange").withExactArgs({reason : ChangeReason.Change});
// code under test
oBinding.checkUpdate();
// code under test: The second call must call update, but not fire an event
oBinding.checkUpdate();
oBindingMock.expects("_fireChange").withExactArgs({reason : ChangeReason.Change});
// code under test: Must fire a change event
oBinding.checkUpdate(true);
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#getContexts, getCurrentContexts", function (assert) {
var oBinding,
oMetaModel = this.oMetaModel, // instead of "that = this"
oContext = oMetaModel.getMetaContext("/EMPLOYEES"),
sPath = "";
function assertContextPaths(aContexts, aPaths) {
assert.notOk("diff" in aContexts, "extended change detection is ignored");
assert.deepEqual(aContexts.map(function (oContext) {
assert.strictEqual(oContext.getModel(), oMetaModel);
return oContext.getPath().replace("/EMPLOYEES/", "");
}), aPaths);
assert.deepEqual(oBinding.getCurrentContexts(), aContexts);
}
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(mScope));
oBinding = oMetaModel.bindList(sPath, oContext);
// code under test: should be ignored
oBinding.enableExtendedChangeDetection();
assertContextPaths(oBinding.getContexts(0, 2), ["ID", "AGE"]);
assertContextPaths(oBinding.getContexts(1, 2), ["AGE", "EMPLOYEE_2_CONTAINED_S"]);
assertContextPaths(oBinding.getContexts(), ["ID", "AGE", "EMPLOYEE_2_CONTAINED_S",
"EMPLOYEE_2_EQUIPM€NTS", "EMPLOYEE_2_TEAM", "SALÃRY"]);
assertContextPaths(oBinding.getContexts(0, 10), ["ID", "AGE", "EMPLOYEE_2_CONTAINED_S",
"EMPLOYEE_2_EQUIPM€NTS", "EMPLOYEE_2_TEAM", "SALÃRY"]);
oMetaModel.setSizeLimit(2);
assertContextPaths(oBinding.getContexts(), ["ID", "AGE"]);
oBinding.attachEvent("sort", function () {
assert.ok(false, "unexpected sort event");
});
oMetaModel.setSizeLimit(100);
oBinding.sort(new Sorter("@sapui.name"));
assertContextPaths(oBinding.getContexts(), ["AGE", "EMPLOYEE_2_CONTAINED_S",
"EMPLOYEE_2_EQUIPM€NTS", "EMPLOYEE_2_TEAM", "ID", "SALÃRY"]);
oBinding.attachEvent("filter", function () {
assert.ok(false, "unexpected filter event");
});
oBinding.filter(new Filter("$kind", "EQ", "Property"));
assertContextPaths(oBinding.getContexts(), ["AGE", "ID", "SALÃRY"]);
});
//*********************************************************************************************
[{
contextPath : undefined,
metaPath : "@",
result : []
}, {
// <template:repeat list="{entitySet>}" ...>
// Iterate all OData path segments, i.e. (navigation) properties.
// Implicit $Type insertion happens here!
//TODO support for $BaseType
contextPath : "/EMPLOYEES",
metaPath : "",
result : [
"/EMPLOYEES/ID",
"/EMPLOYEES/AGE",
"/EMPLOYEES/EMPLOYEE_2_CONTAINED_S",
"/EMPLOYEES/EMPLOYEE_2_EQUIPM€NTS",
"/EMPLOYEES/EMPLOYEE_2_TEAM",
"/EMPLOYEES/SALÃRY"
]
}, {
// <template:repeat list="{meta>EMPLOYEES}" ...>
// same as before, but with non-empty path
contextPath : "/",
metaPath : "EMPLOYEES",
result : [
"/EMPLOYEES/ID",
"/EMPLOYEES/AGE",
"/EMPLOYEES/EMPLOYEE_2_CONTAINED_S",
"/EMPLOYEES/EMPLOYEE_2_EQUIPM€NTS",
"/EMPLOYEES/EMPLOYEE_2_TEAM",
"/EMPLOYEES/SALÃRY"
]
}, {
// <template:repeat list="{meta>/}" ...>
// Iterate all OData path segments, i.e. entity sets and imports.
// Implicit scope lookup happens here!
metaPath : "/",
result :[
"/ChangeManagerOfTeam",
"/EMPLOYEES",
"/EQUIPM€NTS",
"/GetEmployeeMaxAge",
"/Me",
"/OverloadedAction",
"/TEAMS",
"/T€AMS",
"/VoidAction"
]
}, {
// <template:repeat list="{property>@}" ...>
// Iterate all external targeting annotations.
contextPath : "/T€AMS/Team_Id",
metaPath : "@",
result : [
"/T€AMS/Team_Id@Common.Label",
"/T€AMS/Team_Id@Common.Text",
"/T€AMS/Team_Id@Common.Text@UI.TextArrangement"
]
}, {
// <template:repeat list="{property>@}" ...>
// Iterate all external targeting annotations.
contextPath : "/T€AMS/Name",
metaPath : "@",
result : []
}, {
// <template:repeat list="{field>./@}" ...>
// Iterate all inline annotations.
contextPath : "/T€AMS/$Type/@UI.LineItem/0",
metaPath : "./@",
result : [
"/T€AMS/$Type/@UI.LineItem/0/@UI.Importance"
]
}, {
// <template:repeat list="{at>}" ...>
// Iterate all inline annotations (edge case with empty relative path).
contextPath : "/T€AMS/$Type/@UI.LineItem/0/@",
metaPath : "",
result : [
"/T€AMS/$Type/@UI.LineItem/0/@UI.Importance"
]
}, {
contextPath : undefined,
metaPath : "/Unknown",
result : [],
warning : ["Unknown child Unknown of tea_busi.DefaultContainer", "/Unknown/"]
}].forEach(function (oFixture) {
var sPath = oFixture.contextPath
? oFixture.contextPath + "|"/*make cut more visible*/ + oFixture.metaPath
: oFixture.metaPath;
QUnit.test("ODataMetaListBinding#fetchContexts (sync): " + sPath, function (assert) {
var oBinding,
oMetaModel = this.oMetaModel, // instead of "that = this"
oContext = oFixture.contextPath && oMetaModel.getContext(oFixture.contextPath);
if (oFixture.warning) {
// Note that _getContexts is called twice in this test: once from bindList via the
// constructor, once directly from the test
this.oLogMock.expects("isLoggable").twice()
.withExactArgs(jQuery.sap.log.Level.WARNING, sODataMetaModel)
.returns(true);
this.oLogMock.expects("warning").twice()
.withExactArgs(oFixture.warning[0], oFixture.warning[1], sODataMetaModel);
}
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(0)
.returns(SyncPromise.resolve(mScope));
oBinding = this.oMetaModel.bindList(oFixture.metaPath, oContext);
// code under test
assert.deepEqual(oBinding.fetchContexts().getResult().map(function (oContext) {
assert.strictEqual(oContext.getModel(), oMetaModel);
return oContext.getPath();
}), oFixture.result);
});
});
//*********************************************************************************************
QUnit.test("ODataMetaListBinding#fetchContexts (async)", function (assert) {
var oBinding,
oMetaModel = this.oMetaModel,
sPath = "/foo";
// Note that fetchObject is called twice in this test: once from bindList via the
// constructor, once from fetchContexts
this.oMetaModelMock.expects("fetchObject").twice()
.withExactArgs(sPath + "/")
.returns(SyncPromise.resolve(Promise.resolve({bar: "", baz: ""})));
oBinding = this.oMetaModel.bindList(sPath);
return oBinding.fetchContexts().then(function (oResult) {
assert.deepEqual(oResult.map(function (oContext) {
assert.strictEqual(oContext.getModel(), oMetaModel);
return oContext.getPath();
}), ["/foo/bar", "/foo/baz"]);
});
});
//TODO iterate mix of inline and external targeting annotations
//TODO iterate annotations like "foo@..." for our special cases, e.g. annotations of annotation
//*********************************************************************************************
QUnit.test("events", function (assert) {
assert.throws(function () {
this.oMetaModel.attachParseError();
}, new Error("Unsupported event 'parseError': v4.ODataMetaModel#attachEvent"));
assert.throws(function () {
this.oMetaModel.attachRequestCompleted();
}, new Error("Unsupported event 'requestCompleted': v4.ODataMetaModel#attachEvent"));
assert.throws(function () {
this.oMetaModel.attachRequestFailed();
}, new Error("Unsupported event 'requestFailed': v4.ODataMetaModel#attachEvent"));
assert.throws(function () {
this.oMetaModel.attachRequestSent();
}, new Error("Unsupported event 'requestSent': v4.ODataMetaModel#attachEvent"));
});
//*********************************************************************************************
QUnit.test("validate: mSchema2MetadataUrl", function (assert) {
var mScope = {
"$Version" : "4.0",
"$Reference" : {
"/A/$metadata" : {
"$Include" : [
"A.", "A.A."
]
},
"/B/$metadata" : {
"$Include" : [
"B.", "B.B."
]
},
"/C/$metadata" : {
"$Include" : ["C."]
},
"../../../../default/iwbep/tea_busi_product/0001/$metadata" : {
"$Include" : [
"tea_busi_product."
]
}
}
},
sUrl = "/~/$metadata";
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {});
// simulate a previous reference to a schema with the _same_ reference URI --> allowed!
this.oMetaModel.mSchema2MetadataUrl["A."] = {"/A/$metadata" : false};
// simulate a previous reference to a schema with the _different_ reference URI
// --> allowed as long as the document is not yet read (and will never be read)
this.oMetaModel.mSchema2MetadataUrl["B.B."] = {"/B/V2/$metadata" : false};
// simulate a previous reference to a schema with the _same_ reference URI, already loaded
this.oMetaModel.mSchema2MetadataUrl["C."] = {"/C/$metadata" : true};
// code under test
assert.strictEqual(this.oMetaModel.validate(sUrl, mScope), mScope);
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {
"A." : {"/A/$metadata" : false},
"A.A." : {"/A/$metadata" : false},
"B." : {"/B/$metadata" : false},
"B.B." : {
"/B/$metadata" : false,
"/B/V2/$metadata" : false
},
"C." : {"/C/$metadata" : true},
"tea_busi_product." : {"/a/default/iwbep/tea_busi_product/0001/$metadata" : false}
});
});
//*********************************************************************************************
QUnit.test("getLastModified", function (assert) {
var mEmptyScope = {
"$Version" : "4.0"
},
mNewScope = {
"$Version" : "4.0",
"$Date" : "Tue, 18 Apr 2017 14:40:29 GMT"
},
iNow = Date.now(),
mOldScope = {
"$Version" : "4.0",
"$Date" : "Tue, 18 Apr 2017 14:40:29 GMT", // $LastModified wins!
"$LastModified" : "Fri, 07 Apr 2017 11:21:50 GMT"
},
mOldScopeClone = clone(mOldScope),
sUrl = "/~/$metadata"; // Note: in real life, each URL is read at most once!
// code under test (together with c'tor)
assert.strictEqual(this.oMetaModel.getLastModified().getTime(), 0, "initial value");
// code under test
assert.strictEqual(this.oMetaModel.validate(sUrl, mOldScope), mOldScope);
assert.strictEqual(this.oMetaModel.getLastModified().toISOString(),
"2017-04-07T11:21:50.000Z", "old $LastModified is used");
assert.notOk("$LastModified" in mOldScope);
// code under test
assert.strictEqual(this.oMetaModel.validate(sUrl, mNewScope), mNewScope);
assert.strictEqual(this.oMetaModel.getLastModified().toISOString(),
"2017-04-18T14:40:29.000Z", "new $Date is used");
assert.notOk("$Date" in mNewScope);
// code under test
assert.strictEqual(this.oMetaModel.validate(sUrl, mOldScopeClone), mOldScopeClone);
assert.strictEqual(this.oMetaModel.getLastModified().toISOString(),
"2017-04-18T14:40:29.000Z", "new $Date wins, old $LastModified is ignored");
assert.notOk("$LastModified" in mOldScopeClone);
// code under test
assert.strictEqual(this.oMetaModel.validate(sUrl, mEmptyScope), mEmptyScope);
assert.ok(this.oMetaModel.getLastModified().getTime() >= iNow,
"missing $Date/$LastModified is like 'now': " + this.oMetaModel.getLastModified());
});
//*********************************************************************************************
QUnit.test("getETags", function (assert) {
var sETag = 'W/"..."',
mETags,
that = this;
function codeUnderTest(sUrl, mScope) {
// code under test
assert.strictEqual(that.oMetaModel.validate(sUrl, mScope), mScope);
assert.notOk("$ETag" in mScope);
assert.notOk("$LastModified" in mScope);
}
// code under test (together with c'tor)
assert.deepEqual(this.oMetaModel.getETags(), {}, "initial value");
codeUnderTest("/~/A", {
"$Version" : "4.0",
"$LastModified" : "Fri, 07 Apr 2017 11:21:50 GMT"
});
codeUnderTest("/~/B", {
"$Version" : "4.0",
"$LastModified" : "Tue, 18 Apr 2017 14:40:29 GMT"
});
codeUnderTest("/~/C", {
"$Version" : "4.0"
});
codeUnderTest("/~/D", {
"$Version" : "4.0",
"$ETag" : sETag
});
// code under test
mETags = this.oMetaModel.getETags();
assert.deepEqual(mETags, {
"/~/A" : new Date(Date.UTC(2017, 3, 7, 11, 21, 50)),
"/~/B" : new Date(Date.UTC(2017, 3, 18, 14, 40, 29)),
"/~/C" : null,
"/~/D" : sETag // wins over null!
});
});
//*********************************************************************************************
[{
message : "Unsupported IncludeAnnotations",
scope : {
"$Version" : "4.0",
"$Reference" : {
"/A/$metadata" : {
"$Include" : [
"A."
]
},
"/B/$metadata" : {
"$IncludeAnnotations" : [{
"$TermNamespace" : "com.sap.vocabularies.Common.v1"
}]
}
}
}
}, {
message : "A schema cannot span more than one document: tea_busi."
+ " - is both included and defined",
scope : {
"$Version" : "4.0",
"$Reference" : {
"/B/$metadata" : {
"$Include" : [
"foo.", "tea_busi."
]
}
},
"tea_busi." : {
"$kind" : "Schema"
}
}
}, {
message : "A schema cannot span more than one document: existing."
+ " - expected reference URI /B/v1/$metadata but instead saw /B/v2/$metadata",
scope : {
"$Version" : "4.0",
"$Reference" : {
"/A/$metadata" : {
"$Include" : [
"foo.", "bar."
]
},
"/B/v2/$metadata" : {
"$Include" : [
"baz.", "existing."
]
}
}
}
}].forEach(function (oFixture) {
[false, true].forEach(function (bSupportReferences) {
var sMessage = oFixture.message,
sTitle = "validate: " + sMessage + ", supportReferences: " + bSupportReferences;
QUnit.test(sTitle, function (assert) {
var sUrl = "/~/$metadata",
that = this;
function codeUnderTest() {
var oResult = that.oMetaModel.validate(sUrl, oFixture.scope);
assert.strictEqual(oResult, oFixture.scope);
}
this.oMetaModel.bSupportReferences = bSupportReferences;
// simulate a schema that has been loaded or referenced before
this.oMetaModel.mSchema2MetadataUrl = {
// simulate schema that is already read
"existing." : {"/B/v1/$metadata" : true}
};
if (bSupportReferences) {
this.oLogMock.expects("error")
.withExactArgs(sMessage, sUrl, sODataMetaModel);
}
if (bSupportReferences) {
assert.throws(codeUnderTest, new Error(sUrl + ": " + sMessage));
} else {
codeUnderTest();
}
});
});
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations: without annotation files", function (assert) {
// Note: target elements have been omitted for brevity
var mExpectedAnnotations = {
"same.target" : {
"@Common.Description" : "",
"@Common.Label" : {
"old" : true // Note: no aggregation of properties here!
},
"@Common.Text" : ""
},
"another.target" : {
"@Common.Label" : ""
}
},
mScope = {
"A." : {
"$kind" : "Schema",
"$Annotations" : {
"same.target" : {
"@Common.Label" : {
"old" : true
},
"@Common.Text" : ""
}
}
},
"B." : {
"$kind" : "Schema",
"$Annotations" : {
"same.target" : {
"@Common.Description" : "",
"@Common.Label" : { // illegal overwrite within $metadata, ignored!
"new" : true
}
},
"another.target" : {
"@Common.Label" : ""
}
}
},
"B.B" : {}
};
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, mScope);
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {});
// code under test
this.oMetaModel._mergeAnnotations(mScope, []);
assert.deepEqual(mScope.$Annotations, mExpectedAnnotations,
"$Annotations have been shifted and merged from schemas to root");
assert.notOk("$Annotations" in mScope["A."], "$Annotations removed from schema");
assert.notOk("$Annotations" in mScope["B."], "$Annotations removed from schema");
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {
"A." : {"/a/b/c/d/e/$metadata" : false},
"B." : {"/a/b/c/d/e/$metadata" : false}
});
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations: validation failure for $metadata", function (assert) {
var oError = new Error(),
mScope = {};
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, mScope)
.throws(oError);
assert.throws(function () {
// code under test
this.oMetaModel._mergeAnnotations(mScope, []);
}, oError);
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations: validation failure in annotation file", function (assert) {
var oError = new Error(),
mScope = {},
mAnnotationScope1 = {},
mAnnotationScope2 = {};
this.oMetaModel.aAnnotationUris = ["n/a", "/my/annotation.xml"];
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, mScope);
this.oMetaModelMock.expects("validate")
.withExactArgs("n/a", mAnnotationScope1);
this.oMetaModelMock.expects("validate")
.withExactArgs("/my/annotation.xml", mAnnotationScope2)
.throws(oError);
assert.throws(function () {
// code under test
this.oMetaModel._mergeAnnotations(mScope, [mAnnotationScope1, mAnnotationScope2]);
}, oError);
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations: with annotation files (legacy)", function (assert) {
var sNamespace = "com.sap.gateway.default.iwbep.tea_busi.v0001.",
sWorker = sNamespace + "Worker/",
sBasicSalaryCurr = sWorker + "SALARY/BASIC_SALARY_CURR",
sBasicSalaryCurr2 = "another.schema.2.SALARY/BASIC_SALARY_CURR",
sBonusCurr = sWorker + "SALARY/BONUS_CURR",
sCommonLabel = "@com.sap.vocabularies.Common.v1.Label",
sCommonQuickInfo = "@com.sap.vocabularies.Common.v1.QuickInfo",
sCommonText = "@com.sap.vocabularies.Common.v1.Text",
sBaseUrl = "/" + window.location.pathname.split("/")[1]
+ "/test-resources/sap/ui/core/qunit/odata/v4/data/",
oMetadata = jQuery.sap.sjax({url : sBaseUrl + "metadata.json", dataType : 'json'}).data,
oExpectedResult = clone(oMetadata),
oAnnotation = jQuery.sap.sjax({
url : sBaseUrl + "legacy_annotations.json",
dataType : 'json'
}).data,
oAnnotationCopy = clone(oAnnotation);
// the examples are unrealistic and only need to work in 'legacy mode'
this.oMetaModel.bSupportReferences = false;
this.oMetaModel.aAnnotationUris = ["n/a"];
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, oMetadata);
this.oMetaModelMock.expects("validate")
.withExactArgs("n/a", oAnnotation);
oExpectedResult.$Annotations = oMetadata[sNamespace].$Annotations;
delete oExpectedResult[sNamespace].$Annotations;
// all entries with $kind are merged
oExpectedResult["my.schema.2.FuGetEmployeeMaxAge"] =
oAnnotationCopy["my.schema.2.FuGetEmployeeMaxAge"];
oExpectedResult["my.schema.2.Entity"] =
oAnnotationCopy["my.schema.2.Entity"];
oExpectedResult["my.schema.2.DefaultContainer"] =
oAnnotationCopy["my.schema.2.DefaultContainer"];
oExpectedResult["my.schema.2."] =
oAnnotationCopy["my.schema.2."];
oExpectedResult["another.schema.2."] =
oAnnotationCopy["another.schema.2."];
// update annotations
oExpectedResult.$Annotations[sBasicSalaryCurr][sCommonLabel]
= oAnnotationCopy["my.schema.2."].$Annotations[sBasicSalaryCurr][sCommonLabel];
oExpectedResult.$Annotations[sBasicSalaryCurr][sCommonQuickInfo]
= oAnnotationCopy["my.schema.2."].$Annotations[sBasicSalaryCurr][sCommonQuickInfo];
oExpectedResult.$Annotations[sBonusCurr][sCommonText]
= oAnnotationCopy["my.schema.2."].$Annotations[sBonusCurr][sCommonText];
oExpectedResult.$Annotations[sBasicSalaryCurr2]
= oAnnotationCopy["another.schema.2."].$Annotations[sBasicSalaryCurr2];
delete oExpectedResult["my.schema.2."].$Annotations;
delete oExpectedResult["another.schema.2."].$Annotations;
// code under test
this.oMetaModel._mergeAnnotations(oMetadata, [oAnnotation]);
assert.deepEqual(oMetadata, oExpectedResult, "merged metadata as expected");
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations: with annotation files", function (assert) {
var mScope0 = {
"$EntityContainer" : "tea_busi.DefaultContainer",
"$Reference" : {
"../../../../default/iwbep/tea_busi_foo/0001/$metadata" : {
"$Include" : [
"tea_busi_foo.v0001."
]
}
},
"$Version" : "4.0",
"tea_busi." : {
"$kind" : "Schema",
"$Annotations" : {
"tea_busi.DefaultContainer" : {
"@A" : "from $metadata",
"@B" : "from $metadata",
"@C" : "from $metadata"
},
"tea_busi.TEAM" : {
"@D" : ["from $metadata"],
"@E" : ["from $metadata"],
"@F" : ["from $metadata"]
}
}
},
"tea_busi.DefaultContainer" : {
"$kind" : "EntityContainer"
},
"tea_busi.EQUIPMENT" : {
"$kind" : "EntityType"
},
"tea_busi.TEAM" : {
"$kind" : "EntityType"
},
"tea_busi.Worker" : {
"$kind" : "EntityType"
}
},
mScope1 = {
"$Version" : "4.0",
"tea_busi_foo.v0001." : {
"$kind" : "Schema",
"$Annotations" : {
"tea_busi_foo.v0001.Product/Name" : {
"@Common.Label" : "from $metadata"
}
}
},
"tea_busi_foo.v0001.Product" : {
"$kind" : "EntityType",
"Name" : {
"$kind" : "Property",
"$Type" : "Edm.String"
}
}
},
mAnnotationScope1 = {
"$Version" : "4.0",
"foo." : {
"$kind" : "Schema",
"$Annotations" : {
"tea_busi.DefaultContainer" : {
"@B" : "from annotation #1",
"@C" : "from annotation #1"
},
"tea_busi.TEAM" : {
"@E" : ["from annotation #1"],
"@F" : ["from annotation #1"]
},
"tea_busi.Worker" : {
"@From.Annotation" : {
"$Type" : "some.Record",
"Label" : "from annotation #1"
},
"@From.Annotation1" : "from annotation #1"
}
}
}
},
mAnnotationScope2 = {
"$Version" : "4.0",
"bar." : {
"$kind" : "Schema",
"$Annotations" : {
"tea_busi.DefaultContainer" : {
"@C" : "from annotation #2"
},
"tea_busi.EQUIPMENT" : {
"@From.Annotation2" : "from annotation #2"
},
"tea_busi.TEAM" : {
"@F" : ["from annotation #2"]
},
"tea_busi.Worker" : {
"@From.Annotation" : {
"$Type" : "some.Record",
"Value" : "from annotation #2"
}
},
"tea_busi_foo.v0001.Product/Name" : {
"@Common.Label" : "from annotation #2"
}
}
}
},
mExpectedScope = {
"$Annotations" : {
"tea_busi.DefaultContainer" : {
"@A" : "from $metadata",
"@B" : "from annotation #1",
"@C" : "from annotation #2"
},
"tea_busi.EQUIPMENT" : {
"@From.Annotation2" : "from annotation #2"
},
"tea_busi.TEAM" : { // Note: no aggregation of array elements here!
"@D" : ["from $metadata"],
"@E" : ["from annotation #1"],
"@F" : ["from annotation #2"]
},
"tea_busi.Worker" : {
"@From.Annotation" : {
"$Type" : "some.Record",
// Note: no "Label" here!
"Value" : "from annotation #2"
},
"@From.Annotation1" : "from annotation #1"
},
"tea_busi_foo.v0001.Product/Name" : {
"@Common.Label" : "from annotation #2"
}
},
"$EntityContainer" : "tea_busi.DefaultContainer",
"$Reference" : {
"../../../../default/iwbep/tea_busi_foo/0001/$metadata" : {
"$Include" : [
"tea_busi_foo.v0001."
]
}
},
"$Version" : "4.0",
"bar." : {
"$kind" : "Schema"
},
"foo." : {
"$kind" : "Schema"
},
"tea_busi." : {
"$kind" : "Schema"
},
"tea_busi.DefaultContainer" : {
"$kind" : "EntityContainer"
},
"tea_busi.EQUIPMENT" : {
"$kind" : "EntityType"
},
"tea_busi.TEAM" : {
"$kind" : "EntityType"
},
"tea_busi.Worker" : {
"$kind" : "EntityType"
}
};
this.oMetaModel.aAnnotationUris = ["/URI/1", "/URI/2"];
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, mScope0);
this.oMetaModelMock.expects("validate")
.withExactArgs("/URI/1", mAnnotationScope1);
this.oMetaModelMock.expects("validate")
.withExactArgs("/URI/2", mAnnotationScope2);
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {});
// code under test
this.oMetaModel._mergeAnnotations(mScope0, [mAnnotationScope1, mAnnotationScope2]);
assert.deepEqual(mScope0, mExpectedScope);
assert.strictEqual(mScope0["tea_busi."].$Annotations, undefined);
assert.strictEqual(mAnnotationScope1["foo."].$Annotations, undefined);
assert.strictEqual(mAnnotationScope2["bar."].$Annotations, undefined);
assert.deepEqual(this.oMetaModel.mSchema2MetadataUrl, {
"bar." : {"/URI/2" : false},
"foo." : {"/URI/1" : false},
"tea_busi." : {"/a/b/c/d/e/$metadata" : false}
});
// prepare to load "cross-service reference"
// simulate #validate of mScope0
this.oMetaModel.mSchema2MetadataUrl["tea_busi_foo.v0001."]
= {"/a/default/iwbep/tea_busi_foo/0001/$metadata" : false};
this.oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(mScope0));
this.mock(this.oMetaModel.oRequestor).expects("read")
.withExactArgs("/a/default/iwbep/tea_busi_foo/0001/$metadata")
.returns(Promise.resolve(mScope1));
this.oMetaModelMock.expects("validate")
.withExactArgs("/a/default/iwbep/tea_busi_foo/0001/$metadata", mScope1)
.returns(mScope1);
// code under test
return this.oMetaModel.fetchObject("/tea_busi_foo.v0001.Product/Name@Common.Label")
.then(function (sLabel) {
assert.strictEqual(sLabel, "from annotation #2", "not overwritten by $metadata");
});
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations - error (legacy)", function (assert) {
var oAnnotation1 = {
"tea_busi.NewType1" : {
"$kind" : "EntityType"
}
},
oAnnotation2 = {
"tea_busi.NewType2" : {
"$kind" : "EntityType"
},
"tea_busi.ExistingType" : {
"$kind" : "EntityType"
}
},
sMessage = "A schema cannot span more than one document: tea_busi.ExistingType",
oMetadata = {
"tea_busi.ExistingType" : {
"$kind" : "EntityType"
}
};
this.oMetaModel.aAnnotationUris = ["n/a", "/my/annotation.xml"];
// legacy behavior: $Version is not checked, tea_busi.NewType2 is allowed
this.oMetaModel.bSupportReferences = false;
this.oMetaModelMock.expects("validate")
.withExactArgs(this.oMetaModel.sUrl, oMetadata);
this.oMetaModelMock.expects("validate")
.withExactArgs("n/a", oAnnotation1);
this.oMetaModelMock.expects("validate")
.withExactArgs("/my/annotation.xml", oAnnotation2);
this.oLogMock.expects("error")
.withExactArgs(sMessage, "/my/annotation.xml", sODataMetaModel);
assert.throws(function () {
// code under test
this.oMetaModel._mergeAnnotations(oMetadata, [oAnnotation1, oAnnotation2]);
}, new Error("/my/annotation.xml: " + sMessage));
});
//*********************************************************************************************
QUnit.test("_mergeAnnotations - a schema cannot span more than one document",
function (assert) {
var oAnnotation = {
"$Version" : "4.0",
"tea_busi." : {
"$kind" : "Schema"
}
},
sMessage = "A schema cannot span more than one document: tea_busi.",
oMetadata = {
"$Version" : "4.0",
"tea_busi." : {
"$kind" : "Schema"
}
};
this.oMetaModel.aAnnotationUris = ["n/a", "/my/annotation.xml"];
this.oLogMock.expects("error")
.withExactArgs(sMessage, "/my/annotation.xml", sODataMetaModel);
assert.throws(function () {
// code under test
this.oMetaModel._mergeAnnotations(oMetadata, [{"$Version" : "4.0"}, oAnnotation]);
}, new Error("/my/annotation.xml: " + sMessage));
}
);
//*********************************************************************************************
QUnit.test("getOrCreateValueListModel", function (assert) {
var oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModel = oModel.getMetaModel(),
oValueListModel;
oModel.oRequestor.mHeaders["X-CSRF-Token"] = "xyz";
// code under test
oValueListModel = oMetaModel.getOrCreateValueListModel("../ValueListService/$metadata");
assert.ok(oValueListModel instanceof ODataModel);
assert.strictEqual(oValueListModel.sServiceUrl, "/Foo/ValueListService/");
assert.strictEqual(oValueListModel.getDefaultBindingMode(), BindingMode.OneWay);
assert.strictEqual(oValueListModel.sOperationMode, OperationMode.Server);
assert.strictEqual(oValueListModel.oRequestor.mHeaders["X-CSRF-Token"], "xyz");
// code under test
assert.strictEqual(oMetaModel.getOrCreateValueListModel("/Foo/ValueListService/$metadata"),
oValueListModel);
// code under test
assert.strictEqual(oValueListModel.getMetaModel()
.getOrCreateValueListModel("/Foo/ValueListService/$metadata"),
oValueListModel);
// code under test
assert.strictEqual(oValueListModel.getMetaModel().getOrCreateValueListModel("$metadata"),
oValueListModel);
oModel = new ODataModel({
serviceUrl : "/Foo/DataService2/",
synchronizationMode : "None"
});
// code under test - even a totally different model gets the very same value list model
assert.strictEqual(oModel.getMetaModel()
.getOrCreateValueListModel("../ValueListService/$metadata"),
oValueListModel);
});
//*********************************************************************************************
QUnit.test("getOrCreateValueListModel: relative data service URL", function (assert) {
var sRelativePath = "../../../DataService/",
sAbsolutePath =
new URI(sRelativePath).absoluteTo(document.baseURI).pathname().toString(),
oModel = new ODataModel({
serviceUrl : sRelativePath,
synchronizationMode : "None"
}),
oValueListModel;
// code under test
oValueListModel = oModel.getMetaModel()
.getOrCreateValueListModel("../ValueListService/$metadata");
assert.strictEqual(oValueListModel.sServiceUrl,
new URI("../ValueListService/").absoluteTo(sAbsolutePath).toString());
});
//*********************************************************************************************
QUnit.test("fetchValueListType: unknown property", function (assert) {
var oContext = {},
sPath = "/Products('HT-1000')/Foo";
this.oMetaModelMock.expects("getMetaContext").withExactArgs(sPath).returns(oContext);
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(undefined, sinon.match.same(oContext))
.returns(Promise.resolve());
// code under test
return this.oMetaModel.fetchValueListType(sPath).then(function () {
assert.ok(false);
}, function (oError) {
assert.ok(oError.message, "No metadata for " + sPath);
});
});
//*********************************************************************************************
[{
mAnnotations : {
"@some.other.Annotation" : true
},
sValueListType : ValueListType.None
}, {
mAnnotations : {
"@com.sap.vocabularies.Common.v1.ValueListReferences" : [],
"@com.sap.vocabularies.Common.v1.ValueListWithFixedValues" : true
},
sValueListType : ValueListType.Fixed
}, {
mAnnotations : {
"@com.sap.vocabularies.Common.v1.ValueListReferences" : []
},
sValueListType : ValueListType.Standard
}, {
mAnnotations : {
"@com.sap.vocabularies.Common.v1.ValueListReferences#foo" : [],
"@com.sap.vocabularies.Common.v1.ValueListWithFixedValues" : false
},
sValueListType : ValueListType.Standard
}, {
mAnnotations : {
"@com.sap.vocabularies.Common.v1.ValueListMapping#foo" : {},
"@com.sap.vocabularies.Common.v1.ValueListWithFixedValues" : false
},
sValueListType : ValueListType.Standard
}].forEach(function (oFixture) {
QUnit.test("fetchValueListType: " + oFixture.sValueListType, function (assert) {
var oContext = {},
sPropertyPath = "/ProductList('HT-1000')/Status";
this.oMetaModelMock.expects("getMetaContext")
.withExactArgs(sPropertyPath).returns(oContext);
this.oMetaModelMock.expects("fetchObject")
.withExactArgs(undefined, sinon.match.same(oContext))
.returns(SyncPromise.resolve({}));
this.oMetaModelMock.expects("getObject")
.withExactArgs("@", sinon.match.same(oContext))
.returns(oFixture.mAnnotations);
// code under test
this.oMetaModel.fetchValueListType(sPropertyPath).then(function (sValueListType) {
assert.strictEqual(sValueListType, oFixture.sValueListType);
});
});
});
//*********************************************************************************************
QUnit.test("getValueListType, requestValueListType", function (assert) {
return checkGetAndRequest(this, assert, "fetchValueListType", ["sPath"], true);
});
//*********************************************************************************************
QUnit.test("fetchValueListMappings: success", function (assert) {
var oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
oDefaultMapping = {
"CollectionPath" : "VH_Category1Set",
"Parameters" : [{"p1" : "foo"}]
},
oFooMapping = {
"CollectionPath" : "VH_Category2Set",
"Parameters" : [{"p2" : "bar"}]
},
oProperty = {},
oValueListMetadata = {
"$Annotations" : {
"zui5_epm_sample.Product/Category" : {
"@com.sap.vocabularies.Common.v1.ValueListMapping" : oDefaultMapping,
"@com.sap.vocabularies.Common.v1.ValueListMapping#foo" : oFooMapping
},
"some.other.Target" : {}
}
},
oValueListModel = {
getMetaModel : function () {
return {
fetchEntityContainer : function () {
return Promise.resolve(oValueListMetadata);
}
};
}
};
oMetaModelMock.expects("getObject")
.withExactArgs("/zui5_epm_sample.Product/Category")
.returns(oProperty);
// code under test
return oModel.getMetaModel()
.fetchValueListMappings(oValueListModel, "zui5_epm_sample", oProperty)
.then(function (oValueListMappings) {
assert.deepEqual(oValueListMappings, {
"" : oDefaultMapping,
"foo" : oFooMapping
});
});
});
//*********************************************************************************************
[{
annotations : {
"zui5_epm_sample.Product/CurrencyCode/type.cast" : true
},
error : "Unexpected annotation target 'zui5_epm_sample.Product/CurrencyCode/type.cast' " +
"with namespace of data service in /Foo/ValueListService"
}, {
annotations : {
"zui5_epm_sample.Product/Category" : {
"@some.other.Term" : true
}
},
error : "Unexpected annotation 'some.other.Term' for target "
+ "'zui5_epm_sample.Product/Category' with namespace of data service "
+ "in /Foo/ValueListService"
}, {
annotations : {},
error : "No annotation 'com.sap.vocabularies.Common.v1.ValueListMapping' "
+ "in /Foo/ValueListService"
}].forEach(function (oFixture) {
QUnit.test("fetchValueListMappings: " + oFixture.error, function (assert) {
var oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModel = oModel.getMetaModel(),
oMetaModelMock = this.mock(oMetaModel),
oProperty = {},
oValueListMetadata = {
"$Annotations" : oFixture.annotations
},
oValueListModel = {
getMetaModel : function () {
return {
fetchEntityContainer : function () {
return Promise.resolve(oValueListMetadata);
}
};
},
sServiceUrl : "/Foo/ValueListService"
},
sTarget = Object.keys(oFixture.annotations)[0];
oMetaModelMock.expects("getObject").atLeast(0)
.withExactArgs("/" + sTarget)
.returns(sTarget === "zui5_epm_sample.Product/Category" ? oProperty : undefined);
// code under test
return oMetaModel
.fetchValueListMappings(oValueListModel, "zui5_epm_sample", oProperty)
.then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError.message, oFixture.error);
});
});
});
//*********************************************************************************************
QUnit.test("fetchValueListMappings: value list model is data model", function (assert) {
var oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
oMapping = {
"CollectionPath" : "VH_CountrySet",
"Parameters" : [{"p1" : "foo"}]
},
oProperty = {
"$kind" : "Property"
},
oMetadata = {
"$EntityContainer" : "value_list.Container",
"value_list.VH_BusinessPartner" : {
"$kind" : "Entity",
"Country" : oProperty
},
"$Annotations" : {
// value list on value list
"value_list.VH_BusinessPartner/Country" : {
"@com.sap.vocabularies.Common.v1.Label" : "Country",
"@com.sap.vocabularies.Common.v1.ValueListMapping" : oMapping
},
"value_list.VH_BusinessPartner/Foo" : {/* some other field w/ value list*/}
}
};
oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(oMetadata));
// code under test
return oModel.getMetaModel()
.fetchValueListMappings(oModel, "value_list", oProperty)
.then(function (oValueListMappings) {
assert.deepEqual(oValueListMappings, {
"" : oMapping
});
});
});
//*********************************************************************************************
[{
sPropertyPath : "/EMPLOYEES/unknown",
sExpectedError : "No metadata"
}, {
sPropertyPath : "/EMPLOYEES/AGE",
sExpectedError : "No annotation 'com.sap.vocabularies.Common.v1.ValueListReferences'"
}].forEach(function (oFixture) {
QUnit.test("requestValueListInfo: " + oFixture.sExpectedError, function (assert) {
var oModel = new ODataModel({
serviceUrl : "/~/",
synchronizationMode : "None"
});
this.mock(oModel.getMetaModel()).expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(mScope));
// code under test
return oModel.getMetaModel().requestValueListInfo(oFixture.sPropertyPath)
.then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError.message,
oFixture.sExpectedError + " for " + oFixture.sPropertyPath);
});
});
});
//*********************************************************************************************
[false, true].forEach(function (bDuplicate) {
QUnit.test("requestValueListInfo: duplicate=" + bDuplicate, function (assert) {
var sMappingUrl1 = "../ValueListService1/$metadata",
sMappingUrl2 = "../ValueListService2/$metadata",
sMappingUrlBar = "../ValueListServiceBar/$metadata",
oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
oProperty = {
"$kind" : "Property"
},
sPropertyPath = "/ProductList('HT-1000')/Category",
oMetadata = {
"$EntityContainer" : "zui5_epm_sample.Container",
"zui5_epm_sample.Product" : {
"$kind" : "Entity",
"Category" : oProperty
},
"$Annotations" : {
"zui5_epm_sample.Product/Category" : {
"@com.sap.vocabularies.Common.v1.ValueListReferences" :
[sMappingUrl1, sMappingUrl2],
"@com.sap.vocabularies.Common.v1.ValueListReferences#bar" :
[sMappingUrlBar],
"@com.sap.vocabularies.Common.v1.ValueListReferences#bar@an.Annotation"
: true,
"@some.other.Annotation" : true
}
},
"zui5_epm_sample.Container" : {
"ProductList" : {
"$kind" : "EntitySet",
"$Type" : "zui5_epm_sample.Product"
}
}
},
oValueListMappings1 = {
"" : {CollectionPath : ""}
},
oValueListMappings2 = {
"foo" : {CollectionPath : "foo"}
},
oValueListMappingsBar = {},
oValueListModel1 = {sServiceUrl : sMappingUrl1},
oValueListModel2 = {sServiceUrl : sMappingUrl2},
oValueListModelBar = {sServiceUrl : sMappingUrlBar};
oValueListMappingsBar[bDuplicate ? "" : "bar"] = {CollectionPath : "bar"};
oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(oMetadata));
oMetaModelMock.expects("getOrCreateValueListModel")
.withExactArgs(sMappingUrl1)
.returns(oValueListModel1);
oMetaModelMock.expects("fetchValueListMappings")
.withExactArgs(sinon.match.same(oValueListModel1), "zui5_epm_sample",
sinon.match.same(oProperty))
.returns(Promise.resolve(oValueListMappings1));
oMetaModelMock.expects("getOrCreateValueListModel")
.withExactArgs(sMappingUrl2)
.returns(oValueListModel2);
oMetaModelMock.expects("fetchValueListMappings")
.withExactArgs(sinon.match.same(oValueListModel2), "zui5_epm_sample",
sinon.match.same(oProperty))
.returns(Promise.resolve(oValueListMappings2));
oMetaModelMock.expects("getOrCreateValueListModel")
.withExactArgs(sMappingUrlBar)
.returns(oValueListModelBar);
oMetaModelMock.expects("fetchValueListMappings")
.withExactArgs(sinon.match.same(oValueListModelBar), "zui5_epm_sample",
sinon.match.same(oProperty))
.returns(SyncPromise.resolve(oValueListMappingsBar));
// code under test
return oModel.getMetaModel()
.requestValueListInfo(sPropertyPath)
.then(function (oResult) {
assert.ok(!bDuplicate);
assert.deepEqual(oResult, {
"" : {
$model : oValueListModel1,
CollectionPath : ""
},
"foo" : {
$model : oValueListModel2,
CollectionPath : "foo"
},
"bar" : {
$model : oValueListModelBar,
CollectionPath : "bar"
}
});
}, function (oError) {
assert.ok(bDuplicate);
assert.strictEqual(oError.message,
"Annotations 'com.sap.vocabularies.Common.v1.ValueListMapping' with "
+ "identical qualifier '' for property " + sPropertyPath
+ " in " + sMappingUrlBar + " and " + sMappingUrl1);
});
});
});
//*********************************************************************************************
QUnit.test("requestValueListInfo: same model w/o reference", function (assert) {
var oProperty = {
"$kind" : "Property"
},
oValueListMappingFoo = {CollectionPath : "foo"},
oMetadata = {
"$EntityContainer" : "value_list.Container",
"value_list.Container" : {
"$kind" : "EntityContainer",
"VH_BusinessPartnerSet" : {
"$kind" : "EntitySet",
"$Type" : "value_list.VH_BusinessPartner"
}
},
"value_list.VH_BusinessPartner" : {
"$kind" : "Entity",
"Country" : oProperty
},
"$Annotations" : {
"value_list.VH_BusinessPartner/Country" : {
"@com.sap.vocabularies.Common.v1.ValueListMapping#foo" :
oValueListMappingFoo,
"@com.sap.vocabularies.Common.v1.ValueListMapping#bar" :
{CollectionPath : "bar"}
}
}
},
oModel = new ODataModel({
serviceUrl : "/Foo/ValueListService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
sPropertyPath = "/VH_BusinessPartnerSet('0100000000')/Country";
oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(oMetadata));
// code under test
return oModel.getMetaModel().requestValueListInfo(sPropertyPath).then(function (oResult) {
assert.strictEqual(oResult.foo.$model, oModel);
assert.strictEqual(oResult.bar.$model, oModel);
assert.notOk("$model" in oValueListMappingFoo);
delete oResult.foo.$model;
delete oResult.bar.$model;
assert.deepEqual(oResult, {
"foo" : {CollectionPath : "foo"},
"bar" : {CollectionPath : "bar"}
});
});
});
//*********************************************************************************************
[false, true].forEach(function (bDuplicate) {
var sTitle = "requestValueListInfo: fixed values: duplicate=" + bDuplicate;
QUnit.test(sTitle, function (assert) {
var oValueListMapping = {CollectionPath : "foo"},
oAnnotations = {
"@com.sap.vocabularies.Common.v1.ValueListWithFixedValues" : true,
"@com.sap.vocabularies.Common.v1.ValueListMapping#foo" : oValueListMapping
},
oMetadata = {
"$EntityContainer" : "value_list.Container",
"value_list.Container" : {
"$kind" : "EntityContainer",
"VH_BusinessPartnerSet" : {
"$kind" : "EntitySet",
"$Type" : "value_list.VH_BusinessPartner"
}
},
"value_list.VH_BusinessPartner" : {
"$kind" : "Entity",
"Country" : {}
},
"$Annotations" : {
"value_list.VH_BusinessPartner/Country" : oAnnotations
}
},
oModel = new ODataModel({
serviceUrl : "/Foo/ValueListService/",
synchronizationMode : "None"
}),
sPropertyPath = "/VH_BusinessPartnerSet('42')/Country";
if (bDuplicate) {
oAnnotations["@com.sap.vocabularies.Common.v1.ValueListMapping#bar"] = {};
}
this.mock(oModel.getMetaModel()).expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(oMetadata));
// code under test
return oModel.getMetaModel().requestValueListInfo(sPropertyPath)
.then(function (oResult) {
assert.notOk(bDuplicate);
assert.strictEqual(oResult[""].$model, oModel);
delete oResult[""].$model;
assert.deepEqual(oResult, {
"" : {CollectionPath : "foo"}
});
}, function (oError) {
assert.ok(bDuplicate);
assert.strictEqual(oError.message, "Annotation "
+ "'com.sap.vocabularies.Common.v1.ValueListWithFixedValues' but multiple "
+ "'com.sap.vocabularies.Common.v1.ValueListMapping' for property "
+ sPropertyPath);
});
});
});
// *********************************************************************************************
QUnit.test("requestValueListInfo: property in cross-service reference", function (assert) {
var sMappingUrl = "../ValueListService/$metadata",
oModel = new ODataModel({
serviceUrl : "/Foo/DataService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
oProperty = {
"$kind" : "Property"
},
oMetadata = {
"$Version" : "4.0",
"$Reference" : {
"/Foo/EpmSample/$metadata" : {
"$Include" : ["zui5_epm_sample."]
}
},
"$EntityContainer" : "base.Container",
"base.Container" : {
"BusinessPartnerList" : {
"$kind" : "EntitySet",
"$Type" : "base.BusinessPartner"
}
},
"base.BusinessPartner" : {
"$kind" : "EntityType",
"BP_2_PRODUCT" : {
"$kind" : "NavigationProperty",
"$Type" : "zui5_epm_sample.Product"
}
}
},
oMetadataProduct = {
"$Version" : "4.0",
"zui5_epm_sample.Product" : {
"$kind" : "Entity",
"Category" : oProperty
},
"zui5_epm_sample." : {
"$kind" : "Schema",
"$Annotations" : {
"zui5_epm_sample.Product/Category" : {
"@com.sap.vocabularies.Common.v1.ValueListReferences" : [sMappingUrl]
}
}
}
},
sPropertyPath = "/BusinessPartnerList('0100000000')/BP_2_PRODUCT('HT-1000')/Category",
oRequestorMock = this.mock(oModel.oMetaModel.oRequestor),
oValueListMappings = {
"" : {CollectionPath : ""}
},
oValueListModel = {sServiceUrl : sMappingUrl};
oRequestorMock.expects("read").withExactArgs("/Foo/DataService/$metadata", false, undefined)
.returns(Promise.resolve(oMetadata));
oRequestorMock.expects("read").withExactArgs("/Foo/EpmSample/$metadata")
.returns(Promise.resolve(oMetadataProduct));
oMetaModelMock.expects("getOrCreateValueListModel")
.withExactArgs(sMappingUrl)
.returns(oValueListModel);
oMetaModelMock.expects("fetchValueListMappings")
.withExactArgs(sinon.match.same(oValueListModel), "zui5_epm_sample",
sinon.match.same(oProperty))
.returns(Promise.resolve(oValueListMappings));
// code under test
return oModel.getMetaModel().requestValueListInfo(sPropertyPath).then(function (oResult) {
assert.deepEqual(oResult, {
"" : {
$model : oValueListModel,
CollectionPath : ""
}
});
});
});
// *********************************************************************************************
QUnit.test("requestValueListInfo: same qualifier in reference and local", function (assert) {
var sMappingUrl = "../ValueListService/$metadata",
oProperty = {
"$kind" : "Property"
},
oMetadata = {
"$EntityContainer" : "zui5_epm_sample.Container",
"zui5_epm_sample.Container" : {
"$kind" : "EntityContainer",
"ProductList" : {
"$kind" : "EntitySet",
"$Type" : "zui5_epm_sample.Product"
}
},
"zui5_epm_sample.Product" : {
"$kind" : "Entity",
"Category" : oProperty
},
"$Annotations" : {
"zui5_epm_sample.Product/Category" : {
"@com.sap.vocabularies.Common.v1.ValueListReferences" : [sMappingUrl],
"@com.sap.vocabularies.Common.v1.ValueListMapping#foo" : {}
}
}
},
oModel = new ODataModel({
serviceUrl : "/Foo/ValueListService/",
synchronizationMode : "None"
}),
oMetaModelMock = this.mock(oModel.getMetaModel()),
sPropertyPath = "/ProductList('HT-1000')/Category",
oValueListModel = {};
oMetaModelMock.expects("fetchEntityContainer").atLeast(1)
.returns(SyncPromise.resolve(oMetadata));
oMetaModelMock.expects("getOrCreateValueListModel")
.withExactArgs(sMappingUrl)
.returns(oValueListModel);
oMetaModelMock.expects("fetchValueListMappings")
.withExactArgs(sinon.match.same(oValueListModel), "zui5_epm_sample",
sinon.match.same(oProperty))
.returns(Promise.resolve({"foo" : {}}));
// code under test
return oModel.getMetaModel().requestValueListInfo(sPropertyPath).then(function () {
assert.ok(false);
}, function (oError) {
assert.strictEqual(oError.message,
"Annotations 'com.sap.vocabularies.Common.v1.ValueListMapping' with identical "
+ "qualifier 'foo' for property " + sPropertyPath + " in "
+ oModel.sServiceUrl + "$metadata and " + sMappingUrl);
});
});
// *********************************************************************************************
QUnit.test("fetchModule: synchronously", function (assert) {
var vModule = {};
this.mock(sap.ui).expects("require")
.withExactArgs("sap/ui/model/odata/type/Int")
.returns(vModule); // requested module already loaded
// code under test
assert.strictEqual(this.oMetaModel.fetchModule("sap.ui.model.odata.type.Int").getResult(),
vModule);
});
// *********************************************************************************************
QUnit.test("fetchModule, asynchronous", function (assert) {
var vModule = {},
sModuleName = "sap/ui/model/odata/type/Int64",
oSapUiMock = this.mock(sap.ui);
oSapUiMock.expects("require")
.withExactArgs(sModuleName)
.returns(undefined); // requested module not yet loaded
oSapUiMock.expects("require")
.withExactArgs([sModuleName], sinon.match.func)
.callsArgWithAsync(1, vModule);
// code under test
return this.oMetaModel.fetchModule("sap.ui.model.odata.type.Int64")
.then(function (oResult) {
assert.strictEqual(oResult, vModule);
});
});
//*********************************************************************************************
if (TestUtils.isRealOData()) {
//*****************************************************************************************
QUnit.test("getValueListType, requestValueListInfo: realOData", function (assert) {
var sPath = new URI(TestUtils.proxy(sSampleServiceUrl))
.absoluteTo(window.location.pathname).toString(),
oModel = new ODataModel({
serviceUrl : sPath,
synchronizationMode : "None"
}),
oMetaModel = oModel.getMetaModel(),
sPropertyPath = "/ProductList('HT-1000')/Category";
return oMetaModel.requestObject("/ProductList/").then(function () {
assert.strictEqual(oMetaModel.getValueListType(
"/com.sap.gateway.default.zui5_epm_sample.v0002.Contact/Sex"),
ValueListType.Fixed);
assert.strictEqual(oMetaModel.getValueListType(sPropertyPath),
ValueListType.Standard);
return oMetaModel.requestValueListInfo(sPropertyPath).then(function (oResult) {
var oValueListInfo = oResult[""];
assert.strictEqual(oValueListInfo.CollectionPath, "H_EPM_PD_CATS_SH_Set");
});
});
});
//*****************************************************************************************
QUnit.test("requestValueListInfo: same model w/o reference, realOData", function (assert) {
var oModel = new ODataModel({
serviceUrl : TestUtils.proxy(sSampleServiceUrl),
synchronizationMode : "None"
}),
oMetaModel = oModel.getMetaModel(),
sPropertyPath = "/ProductList/0/CurrencyCode",
oValueListMetaModel;
return oMetaModel.requestObject("/ProductList/").then(function () {
// value list in the data service
assert.strictEqual(oMetaModel.getValueListType(sPropertyPath),
ValueListType.Standard);
return oMetaModel.requestValueListInfo(sPropertyPath);
}).then(function (oValueListInfo) {
var sPropertyPath2 = "/H_TCURC_SH_Set/1/WAERS";
// value list in the value list service
oValueListMetaModel = oValueListInfo[""].$model.getMetaModel();
assert.strictEqual(oValueListMetaModel.getValueListType(sPropertyPath2),
ValueListType.Standard);
assert.strictEqual(oValueListInfo[""].CollectionPath, "H_TCURC_SH_Set");
return oValueListMetaModel.requestValueListInfo(sPropertyPath2);
}).then(function (oValueListInfo) {
assert.strictEqual(oValueListInfo[""].$model.getMetaModel(), oValueListMetaModel);
assert.strictEqual(oValueListInfo[""].CollectionPath, "TCURC_CT_Set");
});
});
}
});
//TODO getContext vs. createBindingContext; map of "singletons" vs. memory leak
|
Java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.