code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io._
import java.lang.ref.{ReferenceQueue => JReferenceQueue, WeakReference}
import java.nio.ByteBuffer
import java.nio.channels.Channels
import java.util.Collections
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.Random
import scala.util.control.NonFatal
import com.codahale.metrics.{MetricRegistry, MetricSet}
import org.apache.spark._
import org.apache.spark.executor.DataReadMethod
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Network
import org.apache.spark.memory.{MemoryManager, MemoryMode}
import org.apache.spark.metrics.source.Source
import org.apache.spark.network._
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.network.client.StreamCallbackWithID
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.shuffle._
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
import org.apache.spark.network.util.TransportConf
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.serializer.{SerializerInstance, SerializerManager}
import org.apache.spark.shuffle.{ShuffleManager, ShuffleWriteMetricsReporter}
import org.apache.spark.storage.memory._
import org.apache.spark.unsafe.Platform
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/* Class for returning a fetched block and associated metrics. */
private[spark] class BlockResult(
val data: Iterator[Any],
val readMethod: DataReadMethod.Value,
val bytes: Long)
/**
* Abstracts away how blocks are stored and provides different ways to read the underlying block
* data. Callers should call [[dispose()]] when they're done with the block.
*/
private[spark] trait BlockData {
def toInputStream(): InputStream
/**
* Returns a Netty-friendly wrapper for the block's data.
*
* Please see `ManagedBuffer.convertToNetty()` for more details.
*/
def toNetty(): Object
def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer
def toByteBuffer(): ByteBuffer
def size: Long
def dispose(): Unit
}
private[spark] class ByteBufferBlockData(
val buffer: ChunkedByteBuffer,
val shouldDispose: Boolean) extends BlockData {
override def toInputStream(): InputStream = buffer.toInputStream(dispose = false)
override def toNetty(): Object = buffer.toNetty
override def toChunkedByteBuffer(allocator: Int => ByteBuffer): ChunkedByteBuffer = {
buffer.copy(allocator)
}
override def toByteBuffer(): ByteBuffer = buffer.toByteBuffer
override def size: Long = buffer.size
override def dispose(): Unit = {
if (shouldDispose) {
buffer.dispose()
}
}
}
/**
* Manager running on every node (driver and executors) which provides interfaces for putting and
* retrieving blocks both locally and remotely into various stores (memory, disk, and off-heap).
*
* Note that [[initialize()]] must be called before the BlockManager is usable.
*/
private[spark] class BlockManager(
executorId: String,
rpcEnv: RpcEnv,
val master: BlockManagerMaster,
val serializerManager: SerializerManager,
val conf: SparkConf,
memoryManager: MemoryManager,
mapOutputTracker: MapOutputTracker,
shuffleManager: ShuffleManager,
val blockTransferService: BlockTransferService,
securityManager: SecurityManager,
numUsableCores: Int)
extends BlockDataManager with BlockEvictionHandler with Logging {
private[spark] val externalShuffleServiceEnabled =
conf.get(config.SHUFFLE_SERVICE_ENABLED)
private val remoteReadNioBufferConversion =
conf.get(Network.NETWORK_REMOTE_READ_NIO_BUFFER_CONVERSION)
val diskBlockManager = {
// Only perform cleanup if an external service is not serving our shuffle files.
val deleteFilesOnStop =
!externalShuffleServiceEnabled || executorId == SparkContext.DRIVER_IDENTIFIER
new DiskBlockManager(conf, deleteFilesOnStop)
}
// Visible for testing
private[storage] val blockInfoManager = new BlockInfoManager
private val futureExecutionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("block-manager-future", 128))
// Actual storage of where blocks are kept
private[spark] val memoryStore =
new MemoryStore(conf, blockInfoManager, serializerManager, memoryManager, this)
private[spark] val diskStore = new DiskStore(conf, diskBlockManager, securityManager)
memoryManager.setMemoryStore(memoryStore)
// Note: depending on the memory manager, `maxMemory` may actually vary over time.
// However, since we use this only for reporting and logging, what we actually want here is
// the absolute maximum value that `maxMemory` can ever possibly reach. We may need
// to revisit whether reporting this value as the "max" is intuitive to the user.
private val maxOnHeapMemory = memoryManager.maxOnHeapStorageMemory
private val maxOffHeapMemory = memoryManager.maxOffHeapStorageMemory
// Port used by the external shuffle service. In Yarn mode, this may be already be
// set through the Hadoop configuration as the server is launched in the Yarn NM.
private val externalShuffleServicePort = {
val tmpPort = Utils.getSparkOrYarnConfig(conf, config.SHUFFLE_SERVICE_PORT.key,
config.SHUFFLE_SERVICE_PORT.defaultValueString).toInt
if (tmpPort == 0) {
// for testing, we set "spark.shuffle.service.port" to 0 in the yarn config, so yarn finds
// an open port. But we still need to tell our spark apps the right port to use. So
// only if the yarn config has the port set to 0, we prefer the value in the spark config
conf.get(config.SHUFFLE_SERVICE_PORT.key).toInt
} else {
tmpPort
}
}
var blockManagerId: BlockManagerId = _
// Address of the server that serves this executor's shuffle files. This is either an external
// service, or just our own Executor's BlockManager.
private[spark] var shuffleServerId: BlockManagerId = _
// Client to read other executors' shuffle files. This is either an external service, or just the
// standard BlockTransferService to directly connect to other Executors.
private[spark] val shuffleClient = if (externalShuffleServiceEnabled) {
val transConf = SparkTransportConf.fromSparkConf(conf, "shuffle", numUsableCores)
new ExternalShuffleClient(transConf, securityManager,
securityManager.isAuthenticationEnabled(), conf.get(config.SHUFFLE_REGISTRATION_TIMEOUT))
} else {
blockTransferService
}
// Max number of failures before this block manager refreshes the block locations from the driver
private val maxFailuresBeforeLocationRefresh =
conf.get(config.BLOCK_FAILURES_BEFORE_LOCATION_REFRESH)
private val slaveEndpoint = rpcEnv.setupEndpoint(
"BlockManagerEndpoint" + BlockManager.ID_GENERATOR.next,
new BlockManagerSlaveEndpoint(rpcEnv, this, mapOutputTracker))
// Pending re-registration action being executed asynchronously or null if none is pending.
// Accesses should synchronize on asyncReregisterLock.
private var asyncReregisterTask: Future[Unit] = null
private val asyncReregisterLock = new Object
// Field related to peer block managers that are necessary for block replication
@volatile private var cachedPeers: Seq[BlockManagerId] = _
private val peerFetchLock = new Object
private var lastPeerFetchTimeNs = 0L
private var blockReplicationPolicy: BlockReplicationPolicy = _
// A DownloadFileManager used to track all the files of remote blocks which are above the
// specified memory threshold. Files will be deleted automatically based on weak reference.
// Exposed for test
private[storage] val remoteBlockTempFileManager =
new BlockManager.RemoteBlockDownloadFileManager(this)
private val maxRemoteBlockToMem = conf.get(config.MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM)
/**
* Initializes the BlockManager with the given appId. This is not performed in the constructor as
* the appId may not be known at BlockManager instantiation time (in particular for the driver,
* where it is only learned after registration with the TaskScheduler).
*
* This method initializes the BlockTransferService and ShuffleClient, registers with the
* BlockManagerMaster, starts the BlockManagerWorker endpoint, and registers with a local shuffle
* service if configured.
*/
def initialize(appId: String): Unit = {
blockTransferService.init(this)
shuffleClient.init(appId)
blockReplicationPolicy = {
val priorityClass = conf.get(config.STORAGE_REPLICATION_POLICY)
val clazz = Utils.classForName(priorityClass)
val ret = clazz.getConstructor().newInstance().asInstanceOf[BlockReplicationPolicy]
logInfo(s"Using $priorityClass for block replication policy")
ret
}
val id =
BlockManagerId(executorId, blockTransferService.hostName, blockTransferService.port, None)
val idFromMaster = master.registerBlockManager(
id,
maxOnHeapMemory,
maxOffHeapMemory,
slaveEndpoint)
blockManagerId = if (idFromMaster != null) idFromMaster else id
shuffleServerId = if (externalShuffleServiceEnabled) {
logInfo(s"external shuffle service port = $externalShuffleServicePort")
BlockManagerId(executorId, blockTransferService.hostName, externalShuffleServicePort)
} else {
blockManagerId
}
// Register Executors' configuration with the local shuffle service, if one should exist.
if (externalShuffleServiceEnabled && !blockManagerId.isDriver) {
registerWithExternalShuffleServer()
}
logInfo(s"Initialized BlockManager: $blockManagerId")
}
def shuffleMetricsSource: Source = {
import BlockManager._
if (externalShuffleServiceEnabled) {
new ShuffleMetricsSource("ExternalShuffle", shuffleClient.shuffleMetrics())
} else {
new ShuffleMetricsSource("NettyBlockTransfer", shuffleClient.shuffleMetrics())
}
}
private def registerWithExternalShuffleServer() {
logInfo("Registering executor with local external shuffle service.")
val shuffleConfig = new ExecutorShuffleInfo(
diskBlockManager.localDirs.map(_.toString),
diskBlockManager.subDirsPerLocalDir,
shuffleManager.getClass.getName)
val MAX_ATTEMPTS = conf.get(config.SHUFFLE_REGISTRATION_MAX_ATTEMPTS)
val SLEEP_TIME_SECS = 5
for (i <- 1 to MAX_ATTEMPTS) {
try {
// Synchronous and will throw an exception if we cannot connect.
shuffleClient.asInstanceOf[ExternalShuffleClient].registerWithShuffleServer(
shuffleServerId.host, shuffleServerId.port, shuffleServerId.executorId, shuffleConfig)
return
} catch {
case e: Exception if i < MAX_ATTEMPTS =>
logError(s"Failed to connect to external shuffle server, will retry ${MAX_ATTEMPTS - i}"
+ s" more times after waiting $SLEEP_TIME_SECS seconds...", e)
Thread.sleep(SLEEP_TIME_SECS * 1000L)
case NonFatal(e) =>
throw new SparkException("Unable to register with external shuffle server due to : " +
e.getMessage, e)
}
}
}
/**
* Report all blocks to the BlockManager again. This may be necessary if we are dropped
* by the BlockManager and come back or if we become capable of recovering blocks on disk after
* an executor crash.
*
* This function deliberately fails silently if the master returns false (indicating that
* the slave needs to re-register). The error condition will be detected again by the next
* heart beat attempt or new block registration and another try to re-register all blocks
* will be made then.
*/
private def reportAllBlocks(): Unit = {
logInfo(s"Reporting ${blockInfoManager.size} blocks to the master.")
for ((blockId, info) <- blockInfoManager.entries) {
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster && !tryToReportBlockStatus(blockId, status)) {
logError(s"Failed to report $blockId to master; giving up.")
return
}
}
}
/**
* Re-register with the master and report all blocks to it. This will be called by the heart beat
* thread if our heartbeat to the block manager indicates that we were not registered.
*
* Note that this method must be called without any BlockInfo locks held.
*/
def reregister(): Unit = {
// TODO: We might need to rate limit re-registering.
logInfo(s"BlockManager $blockManagerId re-registering with master")
master.registerBlockManager(blockManagerId, maxOnHeapMemory, maxOffHeapMemory, slaveEndpoint)
reportAllBlocks()
}
/**
* Re-register with the master sometime soon.
*/
private def asyncReregister(): Unit = {
asyncReregisterLock.synchronized {
if (asyncReregisterTask == null) {
asyncReregisterTask = Future[Unit] {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool
reregister()
asyncReregisterLock.synchronized {
asyncReregisterTask = null
}
}(futureExecutionContext)
}
}
}
/**
* For testing. Wait for any pending asynchronous re-registration; otherwise, do nothing.
*/
def waitForAsyncReregister(): Unit = {
val task = asyncReregisterTask
if (task != null) {
try {
ThreadUtils.awaitReady(task, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new Exception("Error occurred while waiting for async. reregistration", t)
}
}
}
/**
* Interface to get local block data. Throws an exception if the block cannot be found or
* cannot be read successfully.
*/
override def getBlockData(blockId: BlockId): ManagedBuffer = {
if (blockId.isShuffle) {
shuffleManager.shuffleBlockResolver.getBlockData(blockId.asInstanceOf[ShuffleBlockId])
} else {
getLocalBytes(blockId) match {
case Some(blockData) =>
new BlockManagerManagedBuffer(blockInfoManager, blockId, blockData, true)
case None =>
// If this block manager receives a request for a block that it doesn't have then it's
// likely that the master has outdated block statuses for this block. Therefore, we send
// an RPC so that this block is marked as being unavailable from this block manager.
reportBlockStatus(blockId, BlockStatus.empty)
throw new BlockNotFoundException(blockId.toString)
}
}
}
/**
* Put the block locally, using the given storage level.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*/
override def putBlockData(
blockId: BlockId,
data: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Boolean = {
putBytes(blockId, new ChunkedByteBuffer(data.nioByteBuffer()), level)(classTag)
}
override def putBlockDataAsStream(
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_]): StreamCallbackWithID = {
// TODO if we're going to only put the data in the disk store, we should just write it directly
// to the final location, but that would require a deeper refactor of this code. So instead
// we just write to a temp file, and call putBytes on the data in that file.
val tmpFile = diskBlockManager.createTempLocalBlock()._2
val channel = new CountingWritableChannel(
Channels.newChannel(serializerManager.wrapForEncryption(new FileOutputStream(tmpFile))))
logTrace(s"Streaming block $blockId to tmp file $tmpFile")
new StreamCallbackWithID {
override def getID: String = blockId.name
override def onData(streamId: String, buf: ByteBuffer): Unit = {
while (buf.hasRemaining) {
channel.write(buf)
}
}
override def onComplete(streamId: String): Unit = {
logTrace(s"Done receiving block $blockId, now putting into local blockManager")
// Read the contents of the downloaded file as a buffer to put into the blockManager.
// Note this is all happening inside the netty thread as soon as it reads the end of the
// stream.
channel.close()
// TODO SPARK-25035 Even if we're only going to write the data to disk after this, we end up
// using a lot of memory here. We'll read the whole file into a regular
// byte buffer and OOM. We could at least read the tmp file as a stream.
val buffer = securityManager.getIOEncryptionKey() match {
case Some(key) =>
// we need to pass in the size of the unencrypted block
val blockSize = channel.getCount
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
new EncryptedBlockData(tmpFile, blockSize, conf, key).toChunkedByteBuffer(allocator)
case None =>
ChunkedByteBuffer.fromFile(tmpFile)
}
putBytes(blockId, buffer, level)(classTag)
tmpFile.delete()
}
override def onFailure(streamId: String, cause: Throwable): Unit = {
// the framework handles the connection itself, we just need to do local cleanup
channel.close()
tmpFile.delete()
}
}
}
/**
* Get the BlockStatus for the block identified by the given ID, if it exists.
* NOTE: This is mainly for testing.
*/
def getStatus(blockId: BlockId): Option[BlockStatus] = {
blockInfoManager.get(blockId).map { info =>
val memSize = if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val diskSize = if (diskStore.contains(blockId)) diskStore.getSize(blockId) else 0L
BlockStatus(info.level, memSize = memSize, diskSize = diskSize)
}
}
/**
* Get the ids of existing blocks that match the given filter. Note that this will
* query the blocks stored in the disk block manager (that the block manager
* may not know of).
*/
def getMatchingBlockIds(filter: BlockId => Boolean): Seq[BlockId] = {
// The `toArray` is necessary here in order to force the list to be materialized so that we
// don't try to serialize a lazy iterator when responding to client requests.
(blockInfoManager.entries.map(_._1) ++ diskBlockManager.getAllBlocks())
.filter(filter)
.toArray
.toSeq
}
/**
* Tell the master about the current storage status of a block. This will send a block update
* message reflecting the current status, *not* the desired storage level in its block info.
* For example, a block with MEMORY_AND_DISK set might have fallen out to be only on disk.
*
* droppedMemorySize exists to account for when the block is dropped from memory to disk (so
* it is still valid). This ensures that update in master will compensate for the increase in
* memory on slave.
*/
private def reportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Unit = {
val needReregister = !tryToReportBlockStatus(blockId, status, droppedMemorySize)
if (needReregister) {
logInfo(s"Got told to re-register updating block $blockId")
// Re-registering will report our new block for free.
asyncReregister()
}
logDebug(s"Told master about block $blockId")
}
/**
* Actually send a UpdateBlockInfo message. Returns the master's response,
* which will be true if the block was successfully recorded and false if
* the slave needs to re-register.
*/
private def tryToReportBlockStatus(
blockId: BlockId,
status: BlockStatus,
droppedMemorySize: Long = 0L): Boolean = {
val storageLevel = status.storageLevel
val inMemSize = Math.max(status.memSize, droppedMemorySize)
val onDiskSize = status.diskSize
master.updateBlockInfo(blockManagerId, blockId, storageLevel, inMemSize, onDiskSize)
}
/**
* Return the updated storage status of the block with the given ID. More specifically, if
* the block is dropped from memory and possibly added to disk, return the new storage level
* and the updated in-memory and on-disk sizes.
*/
private def getCurrentBlockStatus(blockId: BlockId, info: BlockInfo): BlockStatus = {
info.synchronized {
info.level match {
case null =>
BlockStatus.empty
case level =>
val inMem = level.useMemory && memoryStore.contains(blockId)
val onDisk = level.useDisk && diskStore.contains(blockId)
val deserialized = if (inMem) level.deserialized else false
val replication = if (inMem || onDisk) level.replication else 1
val storageLevel = StorageLevel(
useDisk = onDisk,
useMemory = inMem,
useOffHeap = level.useOffHeap,
deserialized = deserialized,
replication = replication)
val memSize = if (inMem) memoryStore.getSize(blockId) else 0L
val diskSize = if (onDisk) diskStore.getSize(blockId) else 0L
BlockStatus(storageLevel, memSize, diskSize)
}
}
}
/**
* Get locations of an array of blocks.
*/
private def getLocationBlockIds(blockIds: Array[BlockId]): Array[Seq[BlockManagerId]] = {
val startTimeNs = System.nanoTime()
val locations = master.getLocations(blockIds).toArray
logDebug(s"Got multiple block location in ${Utils.getUsedTimeNs(startTimeNs)}")
locations
}
/**
* Cleanup code run in response to a failed local read.
* Must be called while holding a read lock on the block.
*/
private def handleLocalReadFailure(blockId: BlockId): Nothing = {
releaseLock(blockId)
// Remove the missing block so that its unavailability is reported to the driver
removeBlock(blockId)
throw new SparkException(s"Block $blockId was not found even though it's read-locked")
}
/**
* Get block from local block manager as an iterator of Java objects.
*/
def getLocalValues(blockId: BlockId): Option[BlockResult] = {
logDebug(s"Getting local block $blockId")
blockInfoManager.lockForReading(blockId) match {
case None =>
logDebug(s"Block $blockId was not found")
None
case Some(info) =>
val level = info.level
logDebug(s"Level for block $blockId is $level")
val taskAttemptId = Option(TaskContext.get()).map(_.taskAttemptId())
if (level.useMemory && memoryStore.contains(blockId)) {
val iter: Iterator[Any] = if (level.deserialized) {
memoryStore.getValues(blockId).get
} else {
serializerManager.dataDeserializeStream(
blockId, memoryStore.getBytes(blockId).get.toInputStream())(info.classTag)
}
// We need to capture the current taskId in case the iterator completion is triggered
// from a different thread which does not have TaskContext set; see SPARK-18406 for
// discussion.
val ci = CompletionIterator[Any, Iterator[Any]](iter, {
releaseLock(blockId, taskAttemptId)
})
Some(new BlockResult(ci, DataReadMethod.Memory, info.size))
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
val iterToReturn: Iterator[Any] = {
if (level.deserialized) {
val diskValues = serializerManager.dataDeserializeStream(
blockId,
diskData.toInputStream())(info.classTag)
maybeCacheDiskValuesInMemory(info, blockId, level, diskValues)
} else {
val stream = maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map { _.toInputStream(dispose = false) }
.getOrElse { diskData.toInputStream() }
serializerManager.dataDeserializeStream(blockId, stream)(info.classTag)
}
}
val ci = CompletionIterator[Any, Iterator[Any]](iterToReturn, {
releaseLockAndDispose(blockId, diskData, taskAttemptId)
})
Some(new BlockResult(ci, DataReadMethod.Disk, info.size))
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from the local block manager as serialized bytes.
*/
def getLocalBytes(blockId: BlockId): Option[BlockData] = {
logDebug(s"Getting local block $blockId as bytes")
assert(!blockId.isShuffle, s"Unexpected ShuffleBlockId $blockId")
blockInfoManager.lockForReading(blockId).map { info => doGetLocalBytes(blockId, info) }
}
/**
* Get block from the local block manager as serialized bytes.
*
* Must be called while holding a read lock on the block.
* Releases the read lock upon exception; keeps the read lock upon successful return.
*/
private def doGetLocalBytes(blockId: BlockId, info: BlockInfo): BlockData = {
val level = info.level
logDebug(s"Level for block $blockId is $level")
// In order, try to read the serialized bytes from memory, then from disk, then fall back to
// serializing in-memory objects, and, finally, throw an exception if the block does not exist.
if (level.deserialized) {
// Try to avoid expensive serialization by reading a pre-serialized copy from disk:
if (level.useDisk && diskStore.contains(blockId)) {
// Note: we purposely do not try to put the block back into memory here. Since this branch
// handles deserialized blocks, this block may only be cached in memory as objects, not
// serialized bytes. Because the caller only requested bytes, it doesn't make sense to
// cache the block's deserialized objects since that caching may not have a payoff.
diskStore.getBytes(blockId)
} else if (level.useMemory && memoryStore.contains(blockId)) {
// The block was not found on disk, so serialize an in-memory copy:
new ByteBufferBlockData(serializerManager.dataSerializeWithExplicitClassTag(
blockId, memoryStore.getValues(blockId).get, info.classTag), true)
} else {
handleLocalReadFailure(blockId)
}
} else { // storage level is serialized
if (level.useMemory && memoryStore.contains(blockId)) {
new ByteBufferBlockData(memoryStore.getBytes(blockId).get, false)
} else if (level.useDisk && diskStore.contains(blockId)) {
val diskData = diskStore.getBytes(blockId)
maybeCacheDiskBytesInMemory(info, blockId, level, diskData)
.map(new ByteBufferBlockData(_, false))
.getOrElse(diskData)
} else {
handleLocalReadFailure(blockId)
}
}
}
/**
* Get block from remote block managers.
*
* This does not acquire a lock on this block in this JVM.
*/
private def getRemoteValues[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val ct = implicitly[ClassTag[T]]
getRemoteManagedBuffer(blockId).map { data =>
val values =
serializerManager.dataDeserializeStream(blockId, data.createInputStream())(ct)
new BlockResult(values, DataReadMethod.Network, data.size)
}
}
/**
* Return a list of locations for the given block, prioritizing the local machine since
* multiple block managers can share the same host, followed by hosts on the same rack.
*/
private def sortLocations(locations: Seq[BlockManagerId]): Seq[BlockManagerId] = {
val locs = Random.shuffle(locations)
val (preferredLocs, otherLocs) = locs.partition { loc => blockManagerId.host == loc.host }
blockManagerId.topologyInfo match {
case None => preferredLocs ++ otherLocs
case Some(_) =>
val (sameRackLocs, differentRackLocs) = otherLocs.partition {
loc => blockManagerId.topologyInfo == loc.topologyInfo
}
preferredLocs ++ sameRackLocs ++ differentRackLocs
}
}
/**
* Get block from remote block managers as a ManagedBuffer.
*/
private def getRemoteManagedBuffer(blockId: BlockId): Option[ManagedBuffer] = {
logDebug(s"Getting remote block $blockId")
require(blockId != null, "BlockId is null")
var runningFailureCount = 0
var totalFailureCount = 0
// Because all the remote blocks are registered in driver, it is not necessary to ask
// all the slave executors to get block status.
val locationsAndStatus = master.getLocationsAndStatus(blockId)
val blockSize = locationsAndStatus.map { b =>
b.status.diskSize.max(b.status.memSize)
}.getOrElse(0L)
val blockLocations = locationsAndStatus.map(_.locations).getOrElse(Seq.empty)
// If the block size is above the threshold, we should pass our FileManger to
// BlockTransferService, which will leverage it to spill the block; if not, then passed-in
// null value means the block will be persisted in memory.
val tempFileManager = if (blockSize > maxRemoteBlockToMem) {
remoteBlockTempFileManager
} else {
null
}
val locations = sortLocations(blockLocations)
val maxFetchFailures = locations.size
var locationIterator = locations.iterator
while (locationIterator.hasNext) {
val loc = locationIterator.next()
logDebug(s"Getting remote block $blockId from $loc")
val data = try {
blockTransferService.fetchBlockSync(
loc.host, loc.port, loc.executorId, blockId.toString, tempFileManager)
} catch {
case NonFatal(e) =>
runningFailureCount += 1
totalFailureCount += 1
if (totalFailureCount >= maxFetchFailures) {
// Give up trying anymore locations. Either we've tried all of the original locations,
// or we've refreshed the list of locations from the master, and have still
// hit failures after trying locations from the refreshed list.
logWarning(s"Failed to fetch block after $totalFailureCount fetch failures. " +
s"Most recent failure cause:", e)
return None
}
logWarning(s"Failed to fetch remote block $blockId " +
s"from $loc (failed attempt $runningFailureCount)", e)
// If there is a large number of executors then locations list can contain a
// large number of stale entries causing a large number of retries that may
// take a significant amount of time. To get rid of these stale entries
// we refresh the block locations after a certain number of fetch failures
if (runningFailureCount >= maxFailuresBeforeLocationRefresh) {
locationIterator = sortLocations(master.getLocations(blockId)).iterator
logDebug(s"Refreshed locations from the driver " +
s"after ${runningFailureCount} fetch failures.")
runningFailureCount = 0
}
// This location failed, so we retry fetch from a different one by returning null here
null
}
if (data != null) {
// If the ManagedBuffer is a BlockManagerManagedBuffer, the disposal of the
// byte buffers backing it may need to be handled after reading the bytes.
// In this case, since we just fetched the bytes remotely, we do not have
// a BlockManagerManagedBuffer. The assert here is to ensure that this holds
// true (or the disposal is handled).
assert(!data.isInstanceOf[BlockManagerManagedBuffer])
return Some(data)
}
logDebug(s"The value of block $blockId is null")
}
logDebug(s"Block $blockId not found")
None
}
/**
* Get block from remote block managers as serialized bytes.
*/
def getRemoteBytes(blockId: BlockId): Option[ChunkedByteBuffer] = {
getRemoteManagedBuffer(blockId).map { data =>
// SPARK-24307 undocumented "escape-hatch" in case there are any issues in converting to
// ChunkedByteBuffer, to go back to old code-path. Can be removed post Spark 2.4 if
// new path is stable.
if (remoteReadNioBufferConversion) {
new ChunkedByteBuffer(data.nioByteBuffer())
} else {
ChunkedByteBuffer.fromManagedBuffer(data)
}
}
}
/**
* Get a block from the block manager (either local or remote).
*
* This acquires a read lock on the block if the block was stored locally and does not acquire
* any locks if the block was fetched from a remote block manager. The read lock will
* automatically be freed once the result's `data` iterator is fully consumed.
*/
def get[T: ClassTag](blockId: BlockId): Option[BlockResult] = {
val local = getLocalValues(blockId)
if (local.isDefined) {
logInfo(s"Found block $blockId locally")
return local
}
val remote = getRemoteValues[T](blockId)
if (remote.isDefined) {
logInfo(s"Found block $blockId remotely")
return remote
}
None
}
/**
* Downgrades an exclusive write lock to a shared read lock.
*/
def downgradeLock(blockId: BlockId): Unit = {
blockInfoManager.downgradeLock(blockId)
}
/**
* Release a lock on the given block with explicit TID.
* The param `taskAttemptId` should be passed in case we can't get the correct TID from
* TaskContext, for example, the input iterator of a cached RDD iterates to the end in a child
* thread.
*/
def releaseLock(blockId: BlockId, taskAttemptId: Option[Long] = None): Unit = {
blockInfoManager.unlock(blockId, taskAttemptId)
}
/**
* Registers a task with the BlockManager in order to initialize per-task bookkeeping structures.
*/
def registerTask(taskAttemptId: Long): Unit = {
blockInfoManager.registerTask(taskAttemptId)
}
/**
* Release all locks for the given task.
*
* @return the blocks whose locks were released.
*/
def releaseAllLocksForTask(taskAttemptId: Long): Seq[BlockId] = {
blockInfoManager.releaseAllLocksForTask(taskAttemptId)
}
/**
* Retrieve the given block if it exists, otherwise call the provided `makeIterator` method
* to compute the block, persist it, and return its values.
*
* @return either a BlockResult if the block was successfully cached, or an iterator if the block
* could not be cached.
*/
def getOrElseUpdate[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[T],
makeIterator: () => Iterator[T]): Either[BlockResult, Iterator[T]] = {
// Attempt to read the block from local or remote storage. If it's present, then we don't need
// to go through the local-get-or-put path.
get[T](blockId)(classTag) match {
case Some(block) =>
return Left(block)
case _ =>
// Need to compute the block.
}
// Initially we hold no locks on this block.
doPutIterator(blockId, makeIterator, level, classTag, keepReadLock = true) match {
case None =>
// doPut() didn't hand work back to us, so the block already existed or was successfully
// stored. Therefore, we now hold a read lock on the block.
val blockResult = getLocalValues(blockId).getOrElse {
// Since we held a read lock between the doPut() and get() calls, the block should not
// have been evicted, so get() not returning the block indicates some internal error.
releaseLock(blockId)
throw new SparkException(s"get() failed for block $blockId even though we held a lock")
}
// We already hold a read lock on the block from the doPut() call and getLocalValues()
// acquires the lock again, so we need to call releaseLock() here so that the net number
// of lock acquisitions is 1 (since the caller will only call release() once).
releaseLock(blockId)
Left(blockResult)
case Some(iter) =>
// The put failed, likely because the data was too large to fit in memory and could not be
// dropped to disk. Therefore, we need to pass the input iterator back to the caller so
// that they can decide what to do with the values (e.g. process them without caching).
Right(iter)
}
}
/**
* @return true if the block was stored or false if an error occurred.
*/
def putIterator[T: ClassTag](
blockId: BlockId,
values: Iterator[T],
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(values != null, "Values is null")
doPutIterator(blockId, () => values, level, implicitly[ClassTag[T]], tellMaster) match {
case None =>
true
case Some(iter) =>
// Caller doesn't care about the iterator values, so we can close the iterator here
// to free resources earlier
iter.close()
false
}
}
/**
* A short circuited method to get a block writer that can write data directly to disk.
* The Block will be appended to the File specified by filename. Callers should handle error
* cases.
*/
def getDiskWriter(
blockId: BlockId,
file: File,
serializerInstance: SerializerInstance,
bufferSize: Int,
writeMetrics: ShuffleWriteMetricsReporter): DiskBlockObjectWriter = {
val syncWrites = conf.get(config.SHUFFLE_SYNC)
new DiskBlockObjectWriter(file, serializerManager, serializerInstance, bufferSize,
syncWrites, writeMetrics, blockId)
}
/**
* Put a new block of serialized bytes to the block manager.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*
* @return true if the block was stored or false if an error occurred.
*/
def putBytes[T: ClassTag](
blockId: BlockId,
bytes: ChunkedByteBuffer,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
require(bytes != null, "Bytes is null")
doPutBytes(blockId, bytes, level, implicitly[ClassTag[T]], tellMaster)
}
/**
* Put the given bytes according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* '''Important!''' Callers must not mutate or release the data buffer underlying `bytes`. Doing
* so may corrupt or change the data stored by the `BlockManager`.
*
* @param keepReadLock if true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it
* returns.
* @return true if the block was already present or if the put succeeded, false otherwise.
*/
private def doPutBytes[T](
blockId: BlockId,
bytes: ChunkedByteBuffer,
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean = true,
keepReadLock: Boolean = false): Boolean = {
doPut(blockId, level, classTag, tellMaster = tellMaster, keepReadLock = keepReadLock) { info =>
val startTimeNs = System.nanoTime()
// Since we're storing bytes, initiate the replication before storing them locally.
// This is faster as data is already serialized and ready to send.
val replicationFuture = if (level.replication > 1) {
Future {
// This is a blocking action and should run in futureExecutionContext which is a cached
// thread pool. The ByteBufferBlockData wrapper is not disposed of to avoid releasing
// buffers that are owned by the caller.
replicate(blockId, new ByteBufferBlockData(bytes, false), level, classTag)
}(futureExecutionContext)
} else {
null
}
val size = bytes.size
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
val putSucceeded = if (level.deserialized) {
val values =
serializerManager.dataDeserializeStream(blockId, bytes.toInputStream())(classTag)
memoryStore.putIteratorAsValues(blockId, values, classTag) match {
case Right(_) => true
case Left(iter) =>
// If putting deserialized values in memory failed, we will put the bytes directly to
// disk, so we don't need this iterator and can close it to free resources earlier.
iter.close()
false
}
} else {
val memoryMode = level.memoryMode
memoryStore.putBytes(blockId, size, memoryMode, () => {
if (memoryMode == MemoryMode.OFF_HEAP &&
bytes.chunks.exists(buffer => !buffer.isDirect)) {
bytes.copy(Platform.allocateDirectBuffer)
} else {
bytes
}
})
}
if (!putSucceeded && level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.putBytes(blockId, bytes)
}
} else if (level.useDisk) {
diskStore.putBytes(blockId, bytes)
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store,
// tell the master about it.
info.size = size
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
}
logDebug(s"Put block ${blockId} locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
// Wait for asynchronous replication to finish
try {
ThreadUtils.awaitReady(replicationFuture, Duration.Inf)
} catch {
case NonFatal(t) =>
throw new Exception("Error occurred while waiting for replication to finish", t)
}
}
if (blockWasSuccessfullyStored) {
None
} else {
Some(bytes)
}
}.isEmpty
}
/**
* Helper method used to abstract common code from [[doPutBytes()]] and [[doPutIterator()]].
*
* @param putBody a function which attempts the actual put() and returns None on success
* or Some on failure.
*/
private def doPut[T](
blockId: BlockId,
level: StorageLevel,
classTag: ClassTag[_],
tellMaster: Boolean,
keepReadLock: Boolean)(putBody: BlockInfo => Option[T]): Option[T] = {
require(blockId != null, "BlockId is null")
require(level != null && level.isValid, "StorageLevel is null or invalid")
val putBlockInfo = {
val newInfo = new BlockInfo(level, classTag, tellMaster)
if (blockInfoManager.lockNewBlockForWriting(blockId, newInfo)) {
newInfo
} else {
logWarning(s"Block $blockId already exists on this machine; not re-adding it")
if (!keepReadLock) {
// lockNewBlockForWriting returned a read lock on the existing block, so we must free it:
releaseLock(blockId)
}
return None
}
}
val startTimeNs = System.nanoTime()
var exceptionWasThrown: Boolean = true
val result: Option[T] = try {
val res = putBody(putBlockInfo)
exceptionWasThrown = false
if (res.isEmpty) {
// the block was successfully stored
if (keepReadLock) {
blockInfoManager.downgradeLock(blockId)
} else {
blockInfoManager.unlock(blockId)
}
} else {
removeBlockInternal(blockId, tellMaster = false)
logWarning(s"Putting block $blockId failed")
}
res
} catch {
// Since removeBlockInternal may throw exception,
// we should print exception first to show root cause.
case NonFatal(e) =>
logWarning(s"Putting block $blockId failed due to exception $e.")
throw e
} finally {
// This cleanup is performed in a finally block rather than a `catch` to avoid having to
// catch and properly re-throw InterruptedException.
if (exceptionWasThrown) {
// If an exception was thrown then it's possible that the code in `putBody` has already
// notified the master about the availability of this block, so we need to send an update
// to remove this block location.
removeBlockInternal(blockId, tellMaster = tellMaster)
// The `putBody` code may have also added a new block status to TaskMetrics, so we need
// to cancel that out by overwriting it with an empty block status. We only do this if
// the finally block was entered via an exception because doing this unconditionally would
// cause us to send empty block statuses for every block that failed to be cached due to
// a memory shortage (which is an expected failure, unlike an uncaught exception).
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
val usedTimeMs = Utils.getUsedTimeNs(startTimeNs)
if (level.replication > 1) {
logDebug(s"Putting block ${blockId} with replication took $usedTimeMs")
} else {
logDebug(s"Putting block ${blockId} without replication took ${usedTimeMs}")
}
result
}
/**
* Put the given block according to the given level in one of the block stores, replicating
* the values if necessary.
*
* If the block already exists, this method will not overwrite it.
*
* @param keepReadLock if true, this method will hold the read lock when it returns (even if the
* block already exists). If false, this method will hold no locks when it
* returns.
* @return None if the block was already present or if the put succeeded, or Some(iterator)
* if the put failed.
*/
private def doPutIterator[T](
blockId: BlockId,
iterator: () => Iterator[T],
level: StorageLevel,
classTag: ClassTag[T],
tellMaster: Boolean = true,
keepReadLock: Boolean = false): Option[PartiallyUnrolledIterator[T]] = {
doPut(blockId, level, classTag, tellMaster = tellMaster, keepReadLock = keepReadLock) { info =>
val startTimeNs = System.nanoTime()
var iteratorFromFailedMemoryStorePut: Option[PartiallyUnrolledIterator[T]] = None
// Size of the block in bytes
var size = 0L
if (level.useMemory) {
// Put it in memory first, even if it also has useDisk set to true;
// We will drop it to disk later if the memory store can't hold it.
if (level.deserialized) {
memoryStore.putIteratorAsValues(blockId, iterator(), classTag) match {
case Right(s) =>
size = s
case Left(iter) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iter)(classTag)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(iter)
}
}
} else { // !level.deserialized
memoryStore.putIteratorAsBytes(blockId, iterator(), classTag, level.memoryMode) match {
case Right(s) =>
size = s
case Left(partiallySerializedValues) =>
// Not enough space to unroll this block; drop to disk if applicable
if (level.useDisk) {
logWarning(s"Persisting block $blockId to disk instead.")
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
partiallySerializedValues.finishWritingToStream(out)
}
size = diskStore.getSize(blockId)
} else {
iteratorFromFailedMemoryStorePut = Some(partiallySerializedValues.valuesIterator)
}
}
}
} else if (level.useDisk) {
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(blockId, out, iterator())(classTag)
}
size = diskStore.getSize(blockId)
}
val putBlockStatus = getCurrentBlockStatus(blockId, info)
val blockWasSuccessfullyStored = putBlockStatus.storageLevel.isValid
if (blockWasSuccessfullyStored) {
// Now that the block is in either the memory or disk store, tell the master about it.
info.size = size
if (tellMaster && info.tellMaster) {
reportBlockStatus(blockId, putBlockStatus)
}
addUpdatedBlockStatusToTaskMetrics(blockId, putBlockStatus)
logDebug(s"Put block $blockId locally took ${Utils.getUsedTimeNs(startTimeNs)}")
if (level.replication > 1) {
val remoteStartTimeNs = System.nanoTime()
val bytesToReplicate = doGetLocalBytes(blockId, info)
// [SPARK-16550] Erase the typed classTag when using default serialization, since
// NettyBlockRpcServer crashes when deserializing repl-defined classes.
// TODO(ekl) remove this once the classloader issue on the remote end is fixed.
val remoteClassTag = if (!serializerManager.canUseKryo(classTag)) {
scala.reflect.classTag[Any]
} else {
classTag
}
try {
replicate(blockId, bytesToReplicate, level, remoteClassTag)
} finally {
bytesToReplicate.dispose()
}
logDebug(s"Put block $blockId remotely took ${Utils.getUsedTimeNs(remoteStartTimeNs)}")
}
}
assert(blockWasSuccessfullyStored == iteratorFromFailedMemoryStorePut.isEmpty)
iteratorFromFailedMemoryStorePut
}
}
/**
* Attempts to cache spilled bytes read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the bytes from the memory store if the put succeeded, otherwise None.
* If this returns bytes from the memory store then the original disk store bytes will
* automatically be disposed and the caller should not continue to use them. Otherwise,
* if this returns None then the original disk store bytes will be unaffected.
*/
private def maybeCacheDiskBytesInMemory(
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskData: BlockData): Option[ChunkedByteBuffer] = {
require(!level.deserialized)
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
val allocator = level.memoryMode match {
case MemoryMode.ON_HEAP => ByteBuffer.allocate _
case MemoryMode.OFF_HEAP => Platform.allocateDirectBuffer _
}
val putSucceeded = memoryStore.putBytes(blockId, diskData.size, level.memoryMode, () => {
// https://issues.apache.org/jira/browse/SPARK-6076
// If the file size is bigger than the free memory, OOM will happen. So if we
// cannot put it into MemoryStore, copyForMemory should not be created. That's why
// this action is put into a `() => ChunkedByteBuffer` and created lazily.
diskData.toChunkedByteBuffer(allocator)
})
if (putSucceeded) {
diskData.dispose()
Some(memoryStore.getBytes(blockId).get)
} else {
None
}
}
}
} else {
None
}
}
/**
* Attempts to cache spilled values read from disk into the MemoryStore in order to speed up
* subsequent reads. This method requires the caller to hold a read lock on the block.
*
* @return a copy of the iterator. The original iterator passed this method should no longer
* be used after this method returns.
*/
private def maybeCacheDiskValuesInMemory[T](
blockInfo: BlockInfo,
blockId: BlockId,
level: StorageLevel,
diskIterator: Iterator[T]): Iterator[T] = {
require(level.deserialized)
val classTag = blockInfo.classTag.asInstanceOf[ClassTag[T]]
if (level.useMemory) {
// Synchronize on blockInfo to guard against a race condition where two readers both try to
// put values read from disk into the MemoryStore.
blockInfo.synchronized {
if (memoryStore.contains(blockId)) {
// Note: if we had a means to discard the disk iterator, we would do that here.
memoryStore.getValues(blockId).get
} else {
memoryStore.putIteratorAsValues(blockId, diskIterator, classTag) match {
case Left(iter) =>
// The memory store put() failed, so it returned the iterator back to us:
iter
case Right(_) =>
// The put() succeeded, so we can read the values back:
memoryStore.getValues(blockId).get
}
}
}.asInstanceOf[Iterator[T]]
} else {
diskIterator
}
}
/**
* Get peer block managers in the system.
*/
private def getPeers(forceFetch: Boolean): Seq[BlockManagerId] = {
peerFetchLock.synchronized {
val cachedPeersTtl = conf.get(config.STORAGE_CACHED_PEERS_TTL) // milliseconds
val diff = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lastPeerFetchTimeNs)
val timeout = diff > cachedPeersTtl
if (cachedPeers == null || forceFetch || timeout) {
cachedPeers = master.getPeers(blockManagerId).sortBy(_.hashCode)
lastPeerFetchTimeNs = System.nanoTime()
logDebug("Fetched peers from master: " + cachedPeers.mkString("[", ",", "]"))
}
cachedPeers
}
}
/**
* Called for pro-active replenishment of blocks lost due to executor failures
*
* @param blockId blockId being replicate
* @param existingReplicas existing block managers that have a replica
* @param maxReplicas maximum replicas needed
*/
def replicateBlock(
blockId: BlockId,
existingReplicas: Set[BlockManagerId],
maxReplicas: Int): Unit = {
logInfo(s"Using $blockManagerId to pro-actively replicate $blockId")
blockInfoManager.lockForReading(blockId).foreach { info =>
val data = doGetLocalBytes(blockId, info)
val storageLevel = StorageLevel(
useDisk = info.level.useDisk,
useMemory = info.level.useMemory,
useOffHeap = info.level.useOffHeap,
deserialized = info.level.deserialized,
replication = maxReplicas)
// we know we are called as a result of an executor removal, so we refresh peer cache
// this way, we won't try to replicate to a missing executor with a stale reference
getPeers(forceFetch = true)
try {
replicate(blockId, data, storageLevel, info.classTag, existingReplicas)
} finally {
logDebug(s"Releasing lock for $blockId")
releaseLockAndDispose(blockId, data)
}
}
}
/**
* Replicate block to another node. Note that this is a blocking call that returns after
* the block has been replicated.
*/
private def replicate(
blockId: BlockId,
data: BlockData,
level: StorageLevel,
classTag: ClassTag[_],
existingReplicas: Set[BlockManagerId] = Set.empty): Unit = {
val maxReplicationFailures = conf.get(config.STORAGE_MAX_REPLICATION_FAILURE)
val tLevel = StorageLevel(
useDisk = level.useDisk,
useMemory = level.useMemory,
useOffHeap = level.useOffHeap,
deserialized = level.deserialized,
replication = 1)
val numPeersToReplicateTo = level.replication - 1
val startTime = System.nanoTime
val peersReplicatedTo = mutable.HashSet.empty ++ existingReplicas
val peersFailedToReplicateTo = mutable.HashSet.empty[BlockManagerId]
var numFailures = 0
val initialPeers = getPeers(false).filterNot(existingReplicas.contains)
var peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
initialPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo)
while(numFailures <= maxReplicationFailures &&
!peersForReplication.isEmpty &&
peersReplicatedTo.size < numPeersToReplicateTo) {
val peer = peersForReplication.head
try {
val onePeerStartTime = System.nanoTime
logTrace(s"Trying to replicate $blockId of ${data.size} bytes to $peer")
// This thread keeps a lock on the block, so we do not want the netty thread to unlock
// block when it finishes sending the message.
val buffer = new BlockManagerManagedBuffer(blockInfoManager, blockId, data, false,
unlockOnDeallocate = false)
blockTransferService.uploadBlockSync(
peer.host,
peer.port,
peer.executorId,
blockId,
buffer,
tLevel,
classTag)
logTrace(s"Replicated $blockId of ${data.size} bytes to $peer" +
s" in ${(System.nanoTime - onePeerStartTime).toDouble / 1e6} ms")
peersForReplication = peersForReplication.tail
peersReplicatedTo += peer
} catch {
case NonFatal(e) =>
logWarning(s"Failed to replicate $blockId to $peer, failure #$numFailures", e)
peersFailedToReplicateTo += peer
// we have a failed replication, so we get the list of peers again
// we don't want peers we have already replicated to and the ones that
// have failed previously
val filteredPeers = getPeers(true).filter { p =>
!peersFailedToReplicateTo.contains(p) && !peersReplicatedTo.contains(p)
}
numFailures += 1
peersForReplication = blockReplicationPolicy.prioritize(
blockManagerId,
filteredPeers,
peersReplicatedTo,
blockId,
numPeersToReplicateTo - peersReplicatedTo.size)
}
}
logDebug(s"Replicating $blockId of ${data.size} bytes to " +
s"${peersReplicatedTo.size} peer(s) took ${(System.nanoTime - startTime) / 1e6} ms")
if (peersReplicatedTo.size < numPeersToReplicateTo) {
logWarning(s"Block $blockId replicated to only " +
s"${peersReplicatedTo.size} peer(s) instead of $numPeersToReplicateTo peers")
}
logDebug(s"block $blockId replicated to ${peersReplicatedTo.mkString(", ")}")
}
/**
* Read a block consisting of a single object.
*/
def getSingle[T: ClassTag](blockId: BlockId): Option[T] = {
get[T](blockId).map(_.data.next().asInstanceOf[T])
}
/**
* Write a block consisting of a single object.
*
* @return true if the block was stored or false if the block was already stored or an
* error occurred.
*/
def putSingle[T: ClassTag](
blockId: BlockId,
value: T,
level: StorageLevel,
tellMaster: Boolean = true): Boolean = {
putIterator(blockId, Iterator(value), level, tellMaster)
}
/**
* Drop a block from memory, possibly putting it on disk if applicable. Called when the memory
* store reaches its limit and needs to free up space.
*
* If `data` is not put on disk, it won't be created.
*
* The caller of this method must hold a write lock on the block before calling this method.
* This method does not release the write lock.
*
* @return the block's new effective StorageLevel.
*/
private[storage] override def dropFromMemory[T: ClassTag](
blockId: BlockId,
data: () => Either[Array[T], ChunkedByteBuffer]): StorageLevel = {
logInfo(s"Dropping block $blockId from memory")
val info = blockInfoManager.assertBlockIsLockedForWriting(blockId)
var blockIsUpdated = false
val level = info.level
// Drop to disk, if storage level requires
if (level.useDisk && !diskStore.contains(blockId)) {
logInfo(s"Writing block $blockId to disk")
data() match {
case Left(elements) =>
diskStore.put(blockId) { channel =>
val out = Channels.newOutputStream(channel)
serializerManager.dataSerializeStream(
blockId,
out,
elements.toIterator)(info.classTag.asInstanceOf[ClassTag[T]])
}
case Right(bytes) =>
diskStore.putBytes(blockId, bytes)
}
blockIsUpdated = true
}
// Actually drop from memory store
val droppedMemorySize =
if (memoryStore.contains(blockId)) memoryStore.getSize(blockId) else 0L
val blockIsRemoved = memoryStore.remove(blockId)
if (blockIsRemoved) {
blockIsUpdated = true
} else {
logWarning(s"Block $blockId could not be dropped from memory as it does not exist")
}
val status = getCurrentBlockStatus(blockId, info)
if (info.tellMaster) {
reportBlockStatus(blockId, status, droppedMemorySize)
}
if (blockIsUpdated) {
addUpdatedBlockStatusToTaskMetrics(blockId, status)
}
status.storageLevel
}
/**
* Remove all blocks belonging to the given RDD.
*
* @return The number of blocks removed.
*/
def removeRdd(rddId: Int): Int = {
// TODO: Avoid a linear scan by creating another mapping of RDD.id to blocks.
logInfo(s"Removing RDD $rddId")
val blocksToRemove = blockInfoManager.entries.flatMap(_._1.asRDDId).filter(_.rddId == rddId)
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster = false) }
blocksToRemove.size
}
/**
* Remove all blocks belonging to the given broadcast.
*/
def removeBroadcast(broadcastId: Long, tellMaster: Boolean): Int = {
logDebug(s"Removing broadcast $broadcastId")
val blocksToRemove = blockInfoManager.entries.map(_._1).collect {
case bid @ BroadcastBlockId(`broadcastId`, _) => bid
}
blocksToRemove.foreach { blockId => removeBlock(blockId, tellMaster) }
blocksToRemove.size
}
/**
* Remove a block from both memory and disk.
*/
def removeBlock(blockId: BlockId, tellMaster: Boolean = true): Unit = {
logDebug(s"Removing block $blockId")
blockInfoManager.lockForWriting(blockId) match {
case None =>
// The block has already been removed; do nothing.
logWarning(s"Asked to remove block $blockId, which does not exist")
case Some(info) =>
removeBlockInternal(blockId, tellMaster = tellMaster && info.tellMaster)
addUpdatedBlockStatusToTaskMetrics(blockId, BlockStatus.empty)
}
}
/**
* Internal version of [[removeBlock()]] which assumes that the caller already holds a write
* lock on the block.
*/
private def removeBlockInternal(blockId: BlockId, tellMaster: Boolean): Unit = {
// Removals are idempotent in disk store and memory store. At worst, we get a warning.
val removedFromMemory = memoryStore.remove(blockId)
val removedFromDisk = diskStore.remove(blockId)
if (!removedFromMemory && !removedFromDisk) {
logWarning(s"Block $blockId could not be removed as it was not found on disk or in memory")
}
blockInfoManager.removeBlock(blockId)
if (tellMaster) {
reportBlockStatus(blockId, BlockStatus.empty)
}
}
private def addUpdatedBlockStatusToTaskMetrics(blockId: BlockId, status: BlockStatus): Unit = {
if (conf.get(config.TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES)) {
Option(TaskContext.get()).foreach { c =>
c.taskMetrics().incUpdatedBlockStatuses(blockId -> status)
}
}
}
def releaseLockAndDispose(
blockId: BlockId,
data: BlockData,
taskAttemptId: Option[Long] = None): Unit = {
releaseLock(blockId, taskAttemptId)
data.dispose()
}
def stop(): Unit = {
blockTransferService.close()
if (shuffleClient ne blockTransferService) {
// Closing should be idempotent, but maybe not for the NioBlockTransferService.
shuffleClient.close()
}
remoteBlockTempFileManager.stop()
diskBlockManager.stop()
rpcEnv.stop(slaveEndpoint)
blockInfoManager.clear()
memoryStore.clear()
futureExecutionContext.shutdownNow()
logInfo("BlockManager stopped")
}
}
private[spark] object BlockManager {
private val ID_GENERATOR = new IdGenerator
def blockIdsToLocations(
blockIds: Array[BlockId],
env: SparkEnv,
blockManagerMaster: BlockManagerMaster = null): Map[BlockId, Seq[String]] = {
// blockManagerMaster != null is used in tests
assert(env != null || blockManagerMaster != null)
val blockLocations: Seq[Seq[BlockManagerId]] = if (blockManagerMaster == null) {
env.blockManager.getLocationBlockIds(blockIds)
} else {
blockManagerMaster.getLocations(blockIds)
}
val blockManagers = new HashMap[BlockId, Seq[String]]
for (i <- 0 until blockIds.length) {
blockManagers(blockIds(i)) = blockLocations(i).map { loc =>
ExecutorCacheTaskLocation(loc.host, loc.executorId).toString
}
}
blockManagers.toMap
}
private class ShuffleMetricsSource(
override val sourceName: String,
metricSet: MetricSet) extends Source {
override val metricRegistry = new MetricRegistry
metricRegistry.registerAll(metricSet)
}
class RemoteBlockDownloadFileManager(blockManager: BlockManager)
extends DownloadFileManager with Logging {
// lazy because SparkEnv is set after this
lazy val encryptionKey = SparkEnv.get.securityManager.getIOEncryptionKey()
private class ReferenceWithCleanup(
file: DownloadFile,
referenceQueue: JReferenceQueue[DownloadFile]
) extends WeakReference[DownloadFile](file, referenceQueue) {
val filePath = file.path()
def cleanUp(): Unit = {
logDebug(s"Clean up file $filePath")
if (!file.delete()) {
logDebug(s"Fail to delete file $filePath")
}
}
}
private val referenceQueue = new JReferenceQueue[DownloadFile]
private val referenceBuffer = Collections.newSetFromMap[ReferenceWithCleanup](
new ConcurrentHashMap)
private val POLL_TIMEOUT = 1000
@volatile private var stopped = false
private val cleaningThread = new Thread() { override def run() { keepCleaning() } }
cleaningThread.setDaemon(true)
cleaningThread.setName("RemoteBlock-temp-file-clean-thread")
cleaningThread.start()
override def createTempFile(transportConf: TransportConf): DownloadFile = {
val file = blockManager.diskBlockManager.createTempLocalBlock()._2
encryptionKey match {
case Some(key) =>
// encryption is enabled, so when we read the decrypted data off the network, we need to
// encrypt it when writing to disk. Note that the data may have been encrypted when it
// was cached on disk on the remote side, but it was already decrypted by now (see
// EncryptedBlockData).
new EncryptedDownloadFile(file, key)
case None =>
new SimpleDownloadFile(file, transportConf)
}
}
override def registerTempFileToClean(file: DownloadFile): Boolean = {
referenceBuffer.add(new ReferenceWithCleanup(file, referenceQueue))
}
def stop(): Unit = {
stopped = true
cleaningThread.interrupt()
cleaningThread.join()
}
private def keepCleaning(): Unit = {
while (!stopped) {
try {
Option(referenceQueue.remove(POLL_TIMEOUT))
.map(_.asInstanceOf[ReferenceWithCleanup])
.foreach { ref =>
referenceBuffer.remove(ref)
ref.cleanUp()
}
} catch {
case _: InterruptedException =>
// no-op
case NonFatal(e) =>
logError("Error in cleaning thread", e)
}
}
}
}
/**
* A DownloadFile that encrypts data when it is written, and decrypts when it's read.
*/
private class EncryptedDownloadFile(
file: File,
key: Array[Byte]) extends DownloadFile {
private val env = SparkEnv.get
override def delete(): Boolean = file.delete()
override def openForWriting(): DownloadFileWritableChannel = {
new EncryptedDownloadWritableChannel()
}
override def path(): String = file.getAbsolutePath
private class EncryptedDownloadWritableChannel extends DownloadFileWritableChannel {
private val countingOutput: CountingWritableChannel = new CountingWritableChannel(
Channels.newChannel(env.serializerManager.wrapForEncryption(new FileOutputStream(file))))
override def closeAndRead(): ManagedBuffer = {
countingOutput.close()
val size = countingOutput.getCount
new EncryptedManagedBuffer(new EncryptedBlockData(file, size, env.conf, key))
}
override def write(src: ByteBuffer): Int = countingOutput.write(src)
override def isOpen: Boolean = countingOutput.isOpen()
override def close(): Unit = countingOutput.close()
}
}
}
|
WindCanDie/spark
|
core/src/main/scala/org/apache/spark/storage/BlockManager.scala
|
Scala
|
apache-2.0
| 70,722
|
package org.littlewings.javaee7.batch
import java.io.Serializable
import javax.batch.api.chunk.ItemReader
import javax.enterprise.context.Dependent
import javax.inject.Named
import org.jboss.logging.Logger
@Dependent
@Named("MyItemReader")
class MyItemReader extends ItemReader {
private val logger: Logger = Logger.getLogger(getClass)
private val languages: Iterator[String] =
List("Java", "Scala", "Groovy", "Clojure", "Kotlin", "Perl", "Ruby", "Python", "PHP", "C").iterator
override def open(checkpoint: Serializable): Unit =
logger.infof("open.")
override def readItem(): AnyRef = {
logger.infof("readItem.")
if (languages.hasNext) languages.next
else null
}
override def checkpointInfo(): Serializable = {
logger.infof("checkpoint.")
null
}
override def close(): Unit =
logger.infof("close.")
}
|
kazuhira-r/javaee7-scala-examples
|
jbatch-chunked-with-tx/src/main/scala/org/littlewings/javaee7/batch/MyItemReader.scala
|
Scala
|
mit
| 858
|
/*
* Copyright 2015 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.feature.serialization
import java.util.{Collections => JCollections, Map => JMap, UUID}
import com.vividsolutions.jts.geom.Geometry
import org.geotools.factory.Hints
import org.locationtech.geomesa.feature.serialization.AbstractWriter.NULL_MARKER_STR
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes._
/** Combines all readers.
*
*/
trait AbstractReader[Reader]
extends PrimitiveReader[Reader]
with NullableReader[Reader]
with CollectionReader[Reader]
with GeometryReader[Reader]
with HintKeyReader[Reader] {
def readUUID: DatumReader[Reader, UUID] = (in: Reader) => {
val mostSignificantBits = readLong(in)
val leastSignificantBits = readLong(in)
new UUID(mostSignificantBits, leastSignificantBits)
}
/** A [[DatumReader]] which reads a class name and then an object of that class. If the class name is a null marker
* then ``null`` will be returned.
*/
def readGeneric(version: Version): DatumReader[Reader, AnyRef] = (reader) => {
val className = readString(reader)
if (className == NULL_MARKER_STR) {
null
} else {
val clazz = Class.forName(className)
selectReader(clazz, version).apply(reader)
}
}
/**
* A [[DatumReader]] for reading a map where the key and values may be any type. The map may not be null. The reader
* will call ``readArrayStart(reader)`` and then, for each entry, read up to four items.
*/
def readGenericMap(version: Version): DatumReader[Reader, JMap[AnyRef, AnyRef]] = (reader) => {
var toRead = readArrayStart(reader)
val map = new java.util.HashMap[AnyRef, AnyRef](toRead)
while (toRead > 0) {
val key = readGeneric(version).apply(reader)
val value = readGeneric(version).apply(reader)
map.put(key, value)
toRead -= 1
}
map
}
/**
* @param cls the [[Class]] of the object to be read
* @return a [[DatumReader]] capable of reading object of the given ``clazz``
*/
def selectReader(cls: Class[_], version: Version,
metadata: JMap[_ <: AnyRef, _ <: AnyRef] = JCollections.emptyMap(),
isNullable: isNullableFn = notNullable): DatumReader[Reader, AnyRef] = {
val reader: DatumReader[Reader, AnyRef] = {
if (classOf[java.lang.String].isAssignableFrom(cls)) readString
else if (classOf[java.lang.Integer].isAssignableFrom(cls)) readInt
else if (classOf[java.lang.Long].isAssignableFrom(cls)) readLong
else if (classOf[java.lang.Float].isAssignableFrom(cls)) readFloat
else if (classOf[java.lang.Double].isAssignableFrom(cls)) readDouble
else if (classOf[java.lang.Boolean].isAssignableFrom(cls)) readBoolean
else if (classOf[java.util.Date].isAssignableFrom(cls)) readDate
else if (classOf[UUID].isAssignableFrom(cls)) readUUID
else if (classOf[Geometry].isAssignableFrom(cls)) selectGeometryReader(version)
else if (classOf[Hints.Key].isAssignableFrom(cls)) readHintKey
else if (classOf[java.util.List[_]].isAssignableFrom(cls)) {
val elemClass = metadata.get(USER_DATA_LIST_TYPE).asInstanceOf[Class[_]]
val elemReader = selectReader(elemClass, version, isNullable = isNullable)
readList(elemReader)
}
else if (classOf[java.util.Map[_, _]].isAssignableFrom(cls)) {
val keyClass = metadata.get(USER_DATA_MAP_KEY_TYPE).asInstanceOf[Class[_]]
val valueClass = metadata.get(USER_DATA_MAP_VALUE_TYPE).asInstanceOf[Class[_]]
val keyDecoding = selectReader(keyClass, version, isNullable = isNullable)
val valueDecoding = selectReader(valueClass, version, isNullable = isNullable)
readMap(keyDecoding, valueDecoding)
}
else throw new IllegalArgumentException("Unsupported class: " + cls)
}.asInstanceOf[DatumReader[Reader, AnyRef]]
if (isNullable(cls)) {
readNullable(reader)
} else {
reader
}
}
}
|
jnh5y/geomesa
|
geomesa-feature/src/main/scala/org/locationtech/geomesa/feature/serialization/AbstractReader.scala
|
Scala
|
apache-2.0
| 4,571
|
package rpgboss.editor.uibase
import scala.swing._
import scala.swing.event._
import java.awt.event.MouseEvent
import javax.swing.UIManager
import javax.swing.BorderFactory
import java.awt.Font
import scala.collection.mutable.ArrayBuffer
import rpgboss.editor.Internationalized._
import scala.reflect.ClassTag
import javax.swing.border.BevelBorder
import rpgboss.editor.util.MouseUtil
class InlineWidgetWrapper(
parent: InlineWidgetArrayEditor[_],
var index: Int,
widget: Component)
extends BoxPanel(Orientation.Horizontal) {
border = BorderFactory.createBevelBorder(BevelBorder.RAISED)
val deleteButton = new Button(Action(getMessage("Delete")) {
parent.deleteElement(index)
})
contents += widget
contents += deleteButton
listenTo(mouse.clicks)
reactions += {
case e: MouseClicked =>
requestFocus()
if (MouseUtil.isRightClick(e)) {
val menu = new RpgPopupMenu {
contents += new MenuItem(Action(getMessage("Insert_Above") + "...") {
parent.addAction(index)
})
parent.genericEditAction.map { editAction =>
contents += new MenuItem(Action(getMessage("Edit") + "...") {
editAction(index)
})
}
contents += new MenuItem(Action(getMessage("Delete")) {
parent.deleteElement(index)
})
}
menu.show(this, e.point.x, e.point.y)
} else if (e.clicks == 2) {
parent.genericEditAction.map(editAction => editAction(index))
}
}
}
/**
* Used to edit an array with a set of inline widgets. This differs from the
* ArrayEditingPanel derived classes in a few ways:
* - The editing takes place in an in-line rich widget instead of in a separate
* pane or dialog box.
* - There is no explicit resizing of the array other than by adding or
* deleting specific elements.
* - This is used for arrays with a few elements rather than many.
* @param onUpdate Called when the contents change.
*/
abstract class InlineWidgetArrayEditor[T: ClassTag](
owner: Window,
initial: Array[T],
onUpdate: (Array[T]) => Unit)
extends BoxPanel(Orientation.Vertical) {
def title: String
def addAction(index: Int)
def newInlineWidget(elementModel: T): Component
def genericEditAction: Option[Int => Unit] = None
def getAddPanel(): Option[Component] = {
val panel = new BoxPanel(Orientation.Horizontal) {
contents += Swing.HGlue
contents += new Button(Action(getMessage("Add") + "...") {
addAction(model.length)
})
contents += Swing.HGlue
}
Some(panel)
}
border = BorderFactory.createTitledBorder(title)
val model = ArrayBuffer(initial : _*)
def sendUpdate() = onUpdate(model.toArray)
def newWrappedInlineWidget(index: Int, elementModel: T) = {
new InlineWidgetWrapper(this, index, newInlineWidget(elementModel))
}
val arrayPanel = new BoxPanel(Orientation.Vertical) {
background = UIManager.getColor("TextArea.background")
listenTo(mouse.clicks)
reactions += {
case e: MouseClicked =>
if (MouseUtil.isRightClick(e)) {
val menu = new RpgPopupMenu {
contents +=
new MenuItem(Action(getMessage("Add") + "...") {
addAction(model.length)
})
}
menu.show(this, e.point.x, e.point.y)
} else if (e.clicks == 2) {
addAction(model.length)
}
}
}
for ((element, i) <- model.zipWithIndex) {
arrayPanel.contents += newWrappedInlineWidget(i, element)
}
getAddPanel.map(arrayPanel.contents += _)
arrayPanel.contents += Swing.VGlue
def preferredWidth = 200
val scrollPane = new ScrollPane {
preferredSize = new Dimension(preferredWidth, 200)
contents = arrayPanel
horizontalScrollBarPolicy = ScrollPane.BarPolicy.Never
verticalScrollBarPolicy = ScrollPane.BarPolicy.Always
}
contents += scrollPane
def revalidateAndRepaint() = {
scrollPane.revalidate()
scrollPane.repaint()
}
def insertElement(index: Int, element: T) = {
assert(index >= 0)
assert(index <= model.length)
model.insert(index, element)
sendUpdate()
// Insert a new panel.
arrayPanel.contents.insert(index, newWrappedInlineWidget(index, element))
// Update the index of all the event panels following this one.
for (i <- (index + 1) until model.length) {
arrayPanel.contents(i).asInstanceOf[InlineWidgetWrapper].index += 1
}
revalidateAndRepaint()
}
def deleteElement(index: Int) = {
assert(index >= 0)
assert(index < model.length)
model.remove(index)
sendUpdate()
arrayPanel.contents.remove(index)
for (i <- index until model.length) {
arrayPanel.contents(i).asInstanceOf[InlineWidgetWrapper].index -= 1
}
revalidateAndRepaint()
}
}
|
hendrik-weiler/rpgboss
|
editor/src/main/scala/rpgboss/editor/uibase/InlineWidgetArrayEditor.scala
|
Scala
|
agpl-3.0
| 4,863
|
package com.github.blemale.scaffeine
import java.util.concurrent.Executor
object DirectExecutor extends Executor {
override def execute(command: Runnable): Unit = command.run()
}
|
blemale/scaffeine
|
src/test/scala/com/github/blemale/scaffeine/DirectExecutor.scala
|
Scala
|
apache-2.0
| 183
|
package com.ebay.neutrino.datasource
import com.ebay.neutrino.config.{LoadBalancer, Configuration}
import com.typesafe.config.Config
/**
* Created by blpaul on 2/24/2016.
*/
trait DataSource {
// refresh the datasource
def load() : LoadBalancer
}
class FileReader extends DataSource {
override def load(): LoadBalancer = {
val results = Configuration.load("/etc/neutrino/slb.conf", "resolvers")
LoadBalancer(results)
}
}
|
eBay/Neutrino
|
src/main/scala/com/ebay/neutrino/datasource/DataSource.scala
|
Scala
|
apache-2.0
| 447
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.analysis.TestRelations.testRelation2
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.internal.SQLConf
class SubstituteUnresolvedOrdinalsSuite extends AnalysisTest {
private lazy val a = testRelation2.output(0)
private lazy val b = testRelation2.output(1)
test("unresolved ordinal should not be unresolved") {
// Expression OrderByOrdinal is unresolved.
assert(!UnresolvedOrdinal(0).resolved)
}
test("order by ordinal") {
// Tests order by ordinal, apply single rule.
val plan = testRelation2.orderBy(Literal(1).asc, Literal(2).asc)
comparePlans(
SubstituteUnresolvedOrdinals.apply(plan),
testRelation2.orderBy(UnresolvedOrdinal(1).asc, UnresolvedOrdinal(2).asc))
// Tests order by ordinal, do full analysis
checkAnalysis(plan, testRelation2.orderBy(a.asc, b.asc))
// order by ordinal can be turned off by config
withSQLConf(SQLConf.ORDER_BY_ORDINAL.key -> "false") {
comparePlans(
SubstituteUnresolvedOrdinals.apply(plan),
testRelation2.orderBy(Literal(1).asc, Literal(2).asc))
}
}
test("group by ordinal") {
// Tests group by ordinal, apply single rule.
val plan2 = testRelation2.groupBy(Literal(1), Literal(2))('a, 'b)
comparePlans(
SubstituteUnresolvedOrdinals.apply(plan2),
testRelation2.groupBy(UnresolvedOrdinal(1), UnresolvedOrdinal(2))('a, 'b))
// Tests group by ordinal, do full analysis
checkAnalysis(plan2, testRelation2.groupBy(a, b)(a, b))
// group by ordinal can be turned off by config
withSQLConf(SQLConf.GROUP_BY_ORDINAL.key -> "false") {
comparePlans(
SubstituteUnresolvedOrdinals.apply(plan2),
testRelation2.groupBy(Literal(1), Literal(2))('a, 'b))
}
}
}
|
mahak/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/SubstituteUnresolvedOrdinalsSuite.scala
|
Scala
|
apache-2.0
| 2,766
|
package net.sf.latexdraw.glib.models.impl
import scala.collection.JavaConversions.asScalaBuffer
import net.sf.latexdraw.glib.models.interfaces.IGroup
import net.sf.latexdraw.glib.models.interfaces.IPoint
import net.sf.latexdraw.glib.models.interfaces.IStandardGrid
/**
* This trait encapsulates the code of the group related to the support of standard grids.<br>
* <br>
* This file is part of LaTeXDraw.<br>
* Copyright (c) 2005-2013 Arnaud BLOUIN<br>
* <br>
* LaTeXDraw is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any later version.
* <br>
* LaTeXDraw is distributed without any warranty; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.<br>
* <br>
* 2012-04-17<br>
* @author Arnaud BLOUIN
* @since 3.0
*/
protected trait LGroupStdGrid extends IGroup {
/** May return the first stdGrid of the group. */
private def firstIStdGrid = getShapes.find{shape => shape.isInstanceOf[IStandardGrid] }
override def getGridMinX() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridMinX
case _ => Double.NaN
}
override def getGridMaxX() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridMaxX
case _ => Double.NaN
}
override def getGridMinY() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridMinY
case _ => Double.NaN
}
override def getGridMaxY() =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridMaxY
case _ => Double.NaN
}
override def getLabelsSize() : Int =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getLabelsSize
case _ => -1
}
override def setLabelsSize(labelsSize : Int) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setLabelsSize(labelsSize)
}
}
override def setGridEndX(x : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridEndX(x)
}
}
override def setGridEndY(y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridEndY(y)
}
}
override def isXLabelSouth() : Boolean =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].isXLabelSouth
case _ => false
}
override def setXLabelSouth(isXLabelSouth : Boolean) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setXLabelSouth(isXLabelSouth)
}
}
override def isYLabelWest() : Boolean =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].isYLabelWest
case _ => false
}
override def setYLabelWest(isYLabelWest : Boolean) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setYLabelWest(isYLabelWest)
}
}
override def getGridStartX() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridStartX
case _ => Double.NaN
}
override def getGridStartY() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridStartY
case _ => Double.NaN
}
override def setGridStart(x : Double, y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridStart(x, y)
}
}
override def getGridEndX() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridEndX
case _ => Double.NaN
}
override def getGridEndY() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridEndY
case _ => Double.NaN
}
override def setGridEnd(x : Double, y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridEnd(x, y)
}
}
override def getOriginX() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getOriginX
case _ => Double.NaN
}
override def getOriginY() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getOriginY
case _ => Double.NaN
}
override def setOrigin(x : Double, y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setOrigin(x, y)
}
}
override def setGridStartY(y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridStartY(y)
}
}
override def setGridStartX(x : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setGridStartX(x)
}
}
override def setOriginX(x : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setOriginX(x)
}
}
override def setOriginY(y : Double) {
getShapes.foreach{shape =>
if(shape.isInstanceOf[IStandardGrid])
shape.asInstanceOf[IStandardGrid].setOriginY(y)
}
}
override def getStep() : Double =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getStep
case _ => Double.NaN
}
override def getGridStart() : IPoint =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridStart
case _ => null
}
override def getGridEnd() : IPoint =
firstIStdGrid match {
case Some(stdGrid) => stdGrid.asInstanceOf[IStandardGrid].getGridEnd
case _ => null
}
}
|
arnobl/latexdraw-mutants
|
GUImutants/original/net.sf.latexdraw/src/main/net/sf/latexdraw/glib/models/impl/LGroupStdGrid.scala
|
Scala
|
gpl-2.0
| 5,989
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl._
import org.scalatest.{FlatSpec, Matchers}
import scala.math.abs
@com.intel.analytics.bigdl.tags.Parallel
class TanhSpec extends FlatSpec with Matchers {
"A Tanh Module " should "generate correct output and grad" in {
val module = new Tanh[Double]()
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.17020166106522
input(Array(1, 1, 2)) = 0.57785657607019
input(Array(1, 2, 1)) = -1.3404131438583
input(Array(1, 2, 2)) = 1.0938102817163
input(Array(2, 1, 1)) = 1.120370157063
input(Array(2, 1, 2)) = -1.5014141565189
input(Array(2, 2, 1)) = 0.3380249235779
input(Array(2, 2, 2)) = -0.625677742064
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.79903302760795
gradOutput(Array(1, 1, 2)) = 0.019753993256018
gradOutput(Array(1, 2, 1)) = 0.63136631483212
gradOutput(Array(1, 2, 2)) = 0.29849314852618
gradOutput(Array(2, 1, 1)) = 0.94380705454387
gradOutput(Array(2, 1, 2)) = 0.030344664584845
gradOutput(Array(2, 2, 1)) = 0.33804601291195
gradOutput(Array(2, 2, 2)) = 0.8807330634445
val expectedOutput = Tensor[Double](2, 2, 2)
expectedOutput(Array(1, 1, 1)) = -0.16857698275003
expectedOutput(Array(1, 1, 2)) = 0.52110579963112
expectedOutput(Array(1, 2, 1)) = -0.87177144344863
expectedOutput(Array(1, 2, 2)) = 0.79826462420686
expectedOutput(Array(2, 1, 1)) = 0.80769763073281
expectedOutput(Array(2, 1, 2)) = -0.90540347425835
expectedOutput(Array(2, 2, 1)) = 0.32571298952384
expectedOutput(Array(2, 2, 2)) = -0.55506882753488
val expectedGrad = Tensor[Double](2, 2, 2)
expectedGrad(Array(1, 1, 1)) = 0.77632594793144
expectedGrad(Array(1, 1, 2)) = 0.014389771607755
expectedGrad(Array(1, 2, 1)) = 0.15153710218424
expectedGrad(Array(1, 2, 2)) = 0.1082854310036
expectedGrad(Array(2, 1, 1)) = 0.32809049064441
expectedGrad(Array(2, 1, 2)) = 0.0054694603766104
expectedGrad(Array(2, 2, 1)) = 0.3021830658283
expectedGrad(Array(2, 2, 2)) = 0.6093779706637
val inputOrg = input.clone()
val gradOutputOrg = gradOutput.clone()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
expectedOutput.map(output, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
expectedGrad.map(gradInput, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6);
v1
})
assert(input == inputOrg)
assert(gradOutput == gradOutputOrg)
}
"A Tanh Module " should "be good in gradient check" in {
val module = new Tanh[Double]()
val input = Tensor[Double](2, 2, 2).rand()
val checker = new GradientChecker(1e-4, 1e-2)
checker.checkLayer[Double](module, input) should be(true)
}
}
|
jenniew/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/TanhSpec.scala
|
Scala
|
apache-2.0
| 3,469
|
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
val readme = sc.textFile("README.md")
val wc = readme.flatMap(_.split("[^a-zA-Z]+")).filter(_.nonEmpty).map(_.toLowerCase).countByValue.materialize
val scioResult = sc.run().waitUntilDone()
val w = scioResult.tap(wc).value.maxBy(_._2)._1
println(s"SUCCESS: [$w]")
|
spotify/scio
|
scio-repl/src/it/resources/word-count-repl.scala
|
Scala
|
apache-2.0
| 864
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.driver
import com.stratio.crossdata.test.BaseXDTest
class DriverStandaloneIT extends BaseXDTest {
import com.stratio.crossdata.driver.test.Utils._
driverFactories foreach { case (factory, description) =>
implicit val ctx = DriverTestContext(factory)
"Crossdata driver" should s"fail with a timeout when there is no server $description" in {
the [RuntimeException] thrownBy {
factory.newSession()
} should have message s"Cannot establish connection to XDServer: timed out after ${Driver.InitializationTimeout}"
}
}
}
|
darroyocazorla/crossdata
|
testsIT/src/test/scala/com/stratio/crossdata/driver/DriverStandaloneIT.scala
|
Scala
|
apache-2.0
| 1,216
|
import leon.lang._
import leon.lang.synthesis._
import leon.annotation._
object Numerals {
sealed abstract class Num
case object Z extends Num
case class S(pred: Num) extends Num
def value(n: Num): BigInt = {
n match {
case Z => BigInt(0)
case S(p) => 1 + value(p)
}
} ensuring (_ >= 0)
def add(x: Num, y: Num): Num = (x match {
case Z => y
case S(p) => add(p, S(y))
}) ensuring (value(_) == value(x) + value(y))
def distinct(x: Num, y: Num): Num = {
choose { (r : Num) =>
r != x && r != y
}
}
}
|
epfl-lara/leon
|
testcases/web/synthesis/19_UnaryNumerals_Distinct.scala
|
Scala
|
gpl-3.0
| 561
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, Every, One, Many}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import scala.collection.JavaConverters._
import Matchers._
class EveryShouldContainOneOfLogicalAndSpec extends Spec {
val invertedStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
val invertedListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a != b
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a.toUpperCase == b
}
val upperCaseListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a.map(_.toUpperCase) == b
}
private def upperCase(value: Any): Any =
value match {
case l: Every[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
//ADDITIONAL//
val fileName: String = "EveryShouldContainOneOfLogicalAndSpec.scala"
object `an Every` {
val fumList: Every[String] = Every("fum")
val toList: Every[String] = Every("to")
object `when used with (contain oneOf (..) and contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain oneOf ("fee", "fie", "foe", "fum") and contain oneOf("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (contain oneOf ("happy", "birthday", "to", "you") and contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain oneOf ("fee", "fie", "foe", "fum") and contain oneOf ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and contain oneOf ("FIE", "FEE", "FUM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (contain oneOf ("fee", "fie", "foe", "fum") and contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and (contain oneOf ("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e2, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and contain oneOf ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain oneOf ("fee", "fie", "foe", "fum") and contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
(fumList should (contain oneOf (" FEE ", " FIE ", " FOE ", " FUM ") and contain oneOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain oneOf ("fee", "fie", "foe", "fie", "fum") and contain oneOf("fie", "fee", "fum", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (contain oneOf ("fie", "fee", "fum", "foe") and contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (equal (..) and contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (equal (fumList) and contain oneOf("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and contain oneOf ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and (contain oneOf ("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (equal (toList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (fumList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (equal (toList) and contain oneOf ("fee", "fie", "foe", "fum"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
(fumList should (equal (toList) and contain oneOf (" FEE ", " FIE ", " FOE ", " FUM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (equal (fumList) and contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (be (..) and contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (be_== (fumList) and contain oneOf("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) and contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be_== (fumList) and contain oneOf ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (be_== (fumList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be_== (fumList) and (contain oneOf ("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (be_== (fumList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be_== (fumList) and contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (be_== (toList) and contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (be_== (fumList) and contain oneOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (be_== (fumList) and contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (contain oneOf (..) and be (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain oneOf("fie", "fee", "fum", "foe") and be_== (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain oneOf ("fee", "fie", "foe", "fum") and be_== (toList))
}
checkMessageStackDepth(e1, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", but " + Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and be_== (fumList))
}
checkMessageStackDepth(e2, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and be_== (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain oneOf ("fie", "fee", "fum", "foe") and be_== (toList))
}
checkMessageStackDepth(e1, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fie\\", \\"fee\\", \\"fum\\", \\"foe\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain oneOf ("fie", "fee", "fum", "foe") and (be_== (fumList)))
}
checkMessageStackDepth(e2, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fie\\", \\"fee\\", \\"fum\\", \\"foe\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and be_== (fumList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain oneOf ("fie", "fee", "fum", "foe") and be_== (fumList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fie\\", \\"fee\\", \\"fum\\", \\"foe\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain oneOf ("FEE", "FIE", "FOE", "FUM") and be_== (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", but " + Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (contain oneOf (" FEE ", " FIE ", " FOE ", " FUM ") and be_== (fumList))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain oneOf("fee", "fie", "foe", "fie", "fum") and be_== (fumList))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (not contain oneOf (..) and not contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not contain oneOf ("FEE", "FIE", "FOE", "FUM") and not contain oneOf("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (not contain oneOf ("fee", "fie", "foe", "fum") and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not contain oneOf ("FEE", "FIE", "FOE", "FUM") and not contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not contain oneOf ("fee", "fie", "foe", "fum") and not contain oneOf ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (not contain oneOf ("FEE", "FIE", "FOE", "FUM") and not contain oneOf ("fee", "fie", "foe", "fum"))
}
val e2 = intercept[TestFailedException] {
fumList should (not contain oneOf ("fee", "fie", "foe", "fum") and (not contain oneOf ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not contain oneOf ("fee", "fie", "foe", "fum") and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain oneOf ("FEE", "FIE", "FOE", "FUM") and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not contain oneOf ("fee", "fie", "foe", "fum") and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("didNotContainOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
(fumList should (contain oneOf (" FEE ", " FIE ", " FOE ", " FUM ") and contain oneOf (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not contain oneOf ("fee", "fie", "foe", "fie", "fum") and not contain oneOf("FEE", "FIE", "FOE", "FUM"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (not contain oneOf ("FEE", "FIE", "FOE", "FUM") and not contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (not equal (..) and not contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not equal (toList) and not contain oneOf("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and (not contain oneOf ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not equal (fumList) and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (fumList) and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("didNotEqual", decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not equal (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("equaled", decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (not contain oneOf (" FEE ", " FIE ", " FOE ", " FUU ") and not contain oneOf (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not equal (toList) and not contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (not be (..) and not contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not be_== (toList) and not contain oneOf("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be_== (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not be_== (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) and not contain oneOf ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be_== (toList) and (not contain oneOf ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not be_== (toList) and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be_== (toList) and not contain oneOf ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources("wasNotEqualTo", decorateToStringValue(fumList), decorateToStringValue(toList)) + ", but " + Resources("containedOneOfElements", decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not be_== (fumList) and not contain oneOf ("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, Resources("wasEqualTo", decorateToStringValue(fumList), decorateToStringValue(fumList)), fileName, thisLineNumber - 2)
(fumList should (not contain oneOf (" FEE ", " FIE ", " FOE ", " FUU ") and not contain oneOf (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not be_== (toList) and not contain oneOf("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
}
object `every of Everys` {
val list1s: Every[Every[Int]] = Every(Every(1), Every(1), Every(1))
val lists: Every[Every[Int]] = Every(Every(1), Every(1), Every(2))
val hiLists: Every[Every[String]] = Every(Every("hi"), Every("hi"), Every("hi"))
val toLists: Every[Every[String]] = Every(Every("to"), Every("to"), Every("to"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \\n" +
"in " + decorateToStringValue(left)
object `when used with (contain oneOf (..) and contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain oneOf (3, 2, 1) and contain oneOf (1, 3, 4))
atLeast (2, lists) should (contain oneOf (3, 1, 5) and contain oneOf (1, 3, 4))
atMost (2, lists) should (contain oneOf (3, 2, 8) and contain oneOf (2, 3, 4))
no (lists) should (contain oneOf (3, 6, 9) and contain oneOf (3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (contain oneOf (1, 6, 8) and contain oneOf (1, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, FailureMessages("didNotContainOneOfElements", lists(2), UnquotedString("1, 6, 8")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (contain oneOf (1, 2, 8) and contain oneOf (1, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(2, FailureMessages("containedOneOfElements", lists(2), UnquotedString("1, 2, 8")) + ", but " + FailureMessages("didNotContainOneOfElements", lists(2), UnquotedString("1, 3, 4")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (contain oneOf ("hi", "hello") and contain oneOf ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"hello\\"")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"ho\\", \\"hey\\", \\"howdy\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (lists) should (contain oneOf (1, 2, 8) and contain oneOf (1, 3, 4))
}
checkMessageStackDepth(e4, allErrMsg(2, FailureMessages("containedOneOfElements", lists(2), UnquotedString("1, 2, 8")) + ", but " + FailureMessages("didNotContainOneOfElements", lists(2), UnquotedString("1, 3, 4")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain oneOf ("HI", "HE") and contain oneOf ("HI", "HE"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain oneOf ("hi", "he") and contain oneOf ("HI", "HE"))
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (contain oneOf ("HI", "HE") and contain oneOf ("hi", "he"))
}
checkMessageStackDepth(e2, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain oneOf ("HI", "HE") and contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain oneOf ("hi", "he") and contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (contain oneOf ("HI", "HE") and contain oneOf ("hi", "he"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain oneOf (3, 2, 2, 1) and contain oneOf (1, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain oneOf (1, 3, 4) and contain oneOf (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (be (..) and contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (be_== (One(1)) and contain oneOf (1, 3, 4))
atLeast (2, lists) should (be_== (One(1)) and contain oneOf (1, 3, 4))
atMost (2, lists) should (be_== (One(1)) and contain oneOf (2, 3, 4))
no (lists) should (be_== (One(8)) and contain oneOf (3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (be_== (One(1)) and contain oneOf (1, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(One(2)) + " was not equal to " + decorateToStringValue(One(1)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (list1s) should (be_== (One(1)) and contain oneOf (2, 3, 8))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One(1)) + " was equal to " + decorateToStringValue(One(1)) + ", but " + FailureMessages("didNotContainOneOfElements", list1s(0), UnquotedString("2, 3, 8")), thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (be_== (One("hi")) and contain oneOf ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"ho\\", \\"hey\\", \\"howdy\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (list1s) should (be_== (One(1)) and contain oneOf (2, 3, 8))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(One(1)) + " was equal to " + decorateToStringValue(One(1)) + ", but " + FailureMessages("didNotContainOneOfElements", list1s(0), UnquotedString("2, 3, 8")), thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be_== (One("hi")) and contain oneOf ("HI", "HE"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be_== (One("ho")) and contain oneOf ("hi", "he"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One("hi")) + " was not equal to " + decorateToStringValue(One("ho")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (be_== (One("hi")) and contain oneOf ("hi", "he"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (be_== (One("hi")) and contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be_== (One("ho")) and contain oneOf ("hi", "he"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One("hi")) + " was not equal to " + decorateToStringValue(One("ho")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (be_== (One("hi")) and contain oneOf ("hi", "he"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")) + ", but " + FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"he\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (be_== (One(1)) and contain oneOf (3, 2, 2, 1))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (not contain oneOf (..) and not contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not contain oneOf (3, 2, 8) and not contain oneOf (8, 3, 4))
atLeast (2, lists) should (not contain oneOf (3, 8, 5) and not contain oneOf (8, 3, 4))
atMost (2, lists) should (not contain oneOf (3, 6, 8) and contain oneOf (5, 3, 4))
no (lists) should (not contain oneOf (1, 2, 9) and not contain oneOf (2, 1, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain oneOf (2, 6, 8) and not contain oneOf (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, FailureMessages("containedOneOfElements", lists(2), UnquotedString("2, 6, 8")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not contain oneOf (3, 6, 8) and not contain oneOf (2, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(2, FailureMessages("didNotContainOneOfElements", lists(2), UnquotedString("3, 6, 8")) + ", but " + FailureMessages("containedOneOfElements", lists(2), UnquotedString("2, 3, 4")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not contain oneOf ("hi", "hello") and not contain oneOf ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"hello\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not contain oneOf ("ho", "hey", "howdy") and not contain oneOf ("hi", "hello"))
}
checkMessageStackDepth(e4, allErrMsg(0, FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"ho\\", \\"hey\\", \\"howdy\\"")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"hello\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain oneOf ("hi", "he") and not contain oneOf ("hi", "he"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain oneOf ("HI", "HE") and not contain oneOf ("HO", "HEY", "HOWDY"))
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not contain oneOf ("HO", "HEY", "HOWDY") and not contain oneOf ("HI", "HE"))
}
checkMessageStackDepth(e2, allErrMsg(0, FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"HO\\", \\"HEY\\", \\"HOWDY\\"")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not contain oneOf ("hi", "he") and not contain oneOf ("hi", "he"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain oneOf ("HI", "HE") and not contain oneOf ("HO", "HEY", "HOWDY"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not contain oneOf ("HO", "HEY", "HOWDY") and not contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, FailureMessages("didNotContainOneOfElements", hiLists(0), UnquotedString("\\"HO\\", \\"HEY\\", \\"HOWDY\\"")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain oneOf (3, 2, 2, 1) and not contain oneOf (8, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain oneOf (8, 3, 4) and not contain oneOf (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources("oneOfDuplicate")))
}
}
object `when used with (not be (..) and not contain oneOf (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not be_== (One(2)) and not contain oneOf (8, 3, 4))
atLeast (2, lists) should (not be_== (One(3)) and not contain oneOf (8, 3, 4))
atMost (2, lists) should (not be_== (One(3)) and contain oneOf (5, 3, 4))
no (list1s) should (not be_== (One(1)) and not contain oneOf (2, 1, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (not be_== (One(2)) and not contain oneOf (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(One(2)) + " was equal to " + decorateToStringValue(One(2)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not be_== (One(3)) and not contain oneOf (2, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(2, decorateToStringValue(One(2)) + " was not equal to " + decorateToStringValue(One(3)) + ", but " + FailureMessages("containedOneOfElements", lists(2), UnquotedString("2, 3, 4")), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("hi")) and not contain oneOf ("ho", "hey", "howdy"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("ho")) and not contain oneOf ("hi", "hello"))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(One("hi")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"hi\\", \\"hello\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be_== (One("ho")) and not contain oneOf ("hi", "he"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("hi")) and not contain oneOf ("HI", "HE"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not be_== (One("ho")) and not contain oneOf ("HI", "HE"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One("hi")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not be_== (One("ho")) and not contain oneOf ("hi", "he"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (One("hi")) and not contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One("hi")) + " was equal to " + decorateToStringValue(One("hi")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (One("ho")) and not contain oneOf ("HI", "HE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(One("hi")) + " was not equal to " + decorateToStringValue(One("ho")) + ", but " + FailureMessages("containedOneOfElements", hiLists(0), UnquotedString("\\"HI\\", \\"HE\\"")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not be_== (One(2)) and not contain oneOf (3, 2, 2, 1))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources("oneOfDuplicate")))
}
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalatest/EveryShouldContainOneOfLogicalAndSpec.scala
|
Scala
|
apache-2.0
| 49,773
|
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.util
/**
* Mutable integer map based on open-addressing. Primary use-case is computing
* a count for the number of times a particular value was encountered.
*
* @param noData
* Value to use to represent no data in the array. This value should not
* be used in the input.
* @param capacity
* Initial capacity guideline. The actual size of the underlying buffer
* will be the next prime >= `capacity`. Default is 10.
*/
class IntIntHashMap(noData: Int, capacity: Int = 10) {
private[this] var keys = newArray(capacity)
private[this] var values = new Array[Int](keys.length)
private[this] var used = 0
private[this] var cutoff = computeCutoff(keys.length)
// Set at 50% capacity to get reasonable tradeoff between performance and
// memory use. See IntIntMap benchmark.
private def computeCutoff(n: Int): Int = math.max(3, n / 2)
private def hash(k: Int, length: Int): Int = {
Hash.absOrZero(Hash.lowbias32(k)) % length
}
private def newArray(n: Int): Array[Int] = {
val tmp = new Array[Int](PrimeFinder.nextPrime(n))
var i = 0
while (i < tmp.length) {
tmp(i) = noData
i += 1
}
tmp
}
private def resize(): Unit = {
val tmpKS = newArray(keys.length * 2)
val tmpVS = new Array[Int](tmpKS.length)
var i = 0
while (i < keys.length) {
val k = keys(i)
if (k != noData) put(tmpKS, tmpVS, k, values(i))
i += 1
}
keys = tmpKS
values = tmpVS
cutoff = computeCutoff(tmpKS.length)
}
private def put(ks: Array[Int], vs: Array[Int], k: Int, v: Int): Boolean = {
var pos = hash(k, ks.length)
var posV = ks(pos)
while (posV != noData && posV != k) {
pos = (pos + 1) % ks.length
posV = ks(pos)
}
ks(pos) = k
vs(pos) = v
posV == noData
}
/**
* Put an integer key/value pair into the map. The key, `k`, should not be
* equivalent to the `noData` value used for this map. If an entry with the
* same key already exists, then the value will be overwritten.
*/
def put(k: Int, v: Int): Unit = {
if (used >= cutoff) resize()
if (put(keys, values, k, v)) used += 1
}
/**
* Get the value associated with key, `k`. If no value is present, then the
* `dflt` value will be returned.
*/
def get(k: Int, dflt: Int): Int = {
var pos = hash(k, keys.length)
while (true) {
val prev = keys(pos)
if (prev == noData)
return dflt
else if (prev == k)
return values(pos)
else
pos = (pos + 1) % keys.length
}
dflt
}
/**
* Add one to the count associated with `k`. If the key is not already in the
* map a new entry will be created with a count of 1.
*/
def increment(k: Int): Unit = increment(k, 1)
/**
* Add `amount` to the count associated with `k`. If the key is not already in the
* map a new entry will be created with a count of `amount`.
*/
def increment(k: Int, amount: Int): Unit = {
if (used >= cutoff) resize()
var pos = hash(k, keys.length)
var prev = keys(pos)
while (prev != noData && prev != k) {
pos = (pos + 1) % keys.length
prev = keys(pos)
}
keys(pos) = k
values(pos) += amount
if (prev == noData) used += 1
}
/** Execute `f` for each item in the set. */
def foreach(f: (Int, Int) => Unit): Unit = {
var i = 0
while (i < keys.length) {
val k = keys(i)
if (k != noData) f(k, values(i))
i += 1
}
}
/** Return the number of items in the set. This is a constant time operation. */
def size: Int = used
/** Converts this set to a Map[Int, Int]. Used mostly for debugging and tests. */
def toMap: Map[Int, Int] = {
val builder = Map.newBuilder[Int, Int]
foreach { (k, v) =>
builder += k -> v
}
builder.result()
}
}
|
brharrington/atlas
|
atlas-core/src/main/scala/com/netflix/atlas/core/util/IntIntHashMap.scala
|
Scala
|
apache-2.0
| 4,482
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.util.Random
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasSeed
import org.apache.spark.ml.util._
import org.apache.spark.sql.types.StructType
/**
* :: Experimental ::
*
* Model produced by [[MinHashLSH]], where multiple hash functions are stored. Each hash function
* is picked from the following family of hash functions, where a_i and b_i are randomly chosen
* integers less than prime:
* `h_i(x) = ((x \\cdot a_i + b_i) \\mod prime)`
*
* This hash family is approximately min-wise independent according to the reference.
*
* Reference:
* Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear permutations."
* Electronic Journal of Combinatorics 7 (2000): R26.
*
* @param randCoefficients Pairs of random coefficients. Each pair is used by one hash function.
*/
@Experimental
@Since("2.1.0")
class MinHashLSHModel private[ml](
override val uid: String,
private[ml] val randCoefficients: Array[(Int, Int)])
extends LSHModel[MinHashLSHModel] {
@Since("2.1.0")
override protected[ml] val hashFunction: Vector => Array[Vector] = {
elems: Vector => {
require(elems.numNonzeros > 0, "Must have at least 1 non zero entry.")
val elemsList = elems.toSparse.indices.toList
val hashValues = randCoefficients.map { case (a, b) =>
elemsList.map { elem: Int =>
((1 + elem) * a + b) % MinHashLSH.HASH_PRIME
}.min.toDouble
}
// TODO: Output vectors of dimension numHashFunctions in SPARK-18450
hashValues.map(Vectors.dense(_))
}
}
@Since("2.1.0")
override protected[ml] def keyDistance(x: Vector, y: Vector): Double = {
val xSet = x.toSparse.indices.toSet
val ySet = y.toSparse.indices.toSet
val intersectionSize = xSet.intersect(ySet).size.toDouble
val unionSize = xSet.size + ySet.size - intersectionSize
assert(unionSize > 0, "The union of two input sets must have at least 1 elements")
1 - intersectionSize / unionSize
}
@Since("2.1.0")
override protected[ml] def hashDistance(x: Seq[Vector], y: Seq[Vector]): Double = {
// Since it's generated by hashing, it will be a pair of dense vectors.
// TODO: This hashDistance function requires more discussion in SPARK-18454
x.zip(y).map(vectorPair =>
vectorPair._1.toArray.zip(vectorPair._2.toArray).count(pair => pair._1 != pair._2)
).min
}
@Since("2.1.0")
override def copy(extra: ParamMap): this.type = defaultCopy(extra)
@Since("2.1.0")
override def write: MLWriter = new MinHashLSHModel.MinHashLSHModelWriter(this)
}
/**
* :: Experimental ::
*
* LSH class for Jaccard distance.
*
* The input can be dense or sparse vectors, but it is more efficient if it is sparse. For example,
* `Vectors.sparse(10, Array((2, 1.0), (3, 1.0), (5, 1.0)))`
* means there are 10 elements in the space. This set contains elements 2, 3, and 5. Also, any
* input vector must have at least 1 non-zero index, and all non-zero values are
* treated as binary "1" values.
*
* References:
* <a href="https://en.wikipedia.org/wiki/MinHash">Wikipedia on MinHash</a>
*/
@Experimental
@Since("2.1.0")
class MinHashLSH(override val uid: String) extends LSH[MinHashLSHModel] with HasSeed {
@Since("2.1.0")
override def setInputCol(value: String): this.type = super.setInputCol(value)
@Since("2.1.0")
override def setOutputCol(value: String): this.type = super.setOutputCol(value)
@Since("2.1.0")
override def setNumHashTables(value: Int): this.type = super.setNumHashTables(value)
@Since("2.1.0")
def this() = {
this(Identifiable.randomUID("mh-lsh"))
}
/** @group setParam */
@Since("2.1.0")
def setSeed(value: Long): this.type = set(seed, value)
@Since("2.1.0")
override protected[ml] def createRawLSHModel(inputDim: Int): MinHashLSHModel = {
require(inputDim <= MinHashLSH.HASH_PRIME,
s"The input vector dimension $inputDim exceeds the threshold ${MinHashLSH.HASH_PRIME}.")
val rand = new Random($(seed))
val randCoefs: Array[(Int, Int)] = Array.fill($(numHashTables)) {
(1 + rand.nextInt(MinHashLSH.HASH_PRIME - 1), rand.nextInt(MinHashLSH.HASH_PRIME - 1))
}
new MinHashLSHModel(uid, randCoefs)
}
@Since("2.1.0")
override def transformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(inputCol), new VectorUDT)
validateAndTransformSchema(schema)
}
@Since("2.1.0")
override def copy(extra: ParamMap): this.type = defaultCopy(extra)
}
@Since("2.1.0")
object MinHashLSH extends DefaultParamsReadable[MinHashLSH] {
// A large prime smaller than sqrt(2^63 − 1)
private[ml] val HASH_PRIME = 2038074743
@Since("2.1.0")
override def load(path: String): MinHashLSH = super.load(path)
}
@Since("2.1.0")
object MinHashLSHModel extends MLReadable[MinHashLSHModel] {
@Since("2.1.0")
override def read: MLReader[MinHashLSHModel] = new MinHashLSHModelReader
@Since("2.1.0")
override def load(path: String): MinHashLSHModel = super.load(path)
private[MinHashLSHModel] class MinHashLSHModelWriter(instance: MinHashLSHModel)
extends MLWriter {
private case class Data(randCoefficients: Array[Int])
override protected def saveImpl(path: String): Unit = {
DefaultParamsWriter.saveMetadata(instance, path, sc)
val data = Data(instance.randCoefficients.flatMap(tuple => Array(tuple._1, tuple._2)))
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class MinHashLSHModelReader extends MLReader[MinHashLSHModel] {
/** Checked against metadata when loading model */
private val className = classOf[MinHashLSHModel].getName
override def load(path: String): MinHashLSHModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath).select("randCoefficients").head()
val randCoefficients = data.getAs[Seq[Int]](0).grouped(2)
.map(tuple => (tuple(0), tuple(1))).toArray
val model = new MinHashLSHModel(metadata.uid, randCoefficients)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
|
ZxlAaron/mypros
|
mllib/src/main/scala/org/apache/spark/ml/feature/MinHashLSH.scala
|
Scala
|
apache-2.0
| 7,303
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import com.yammer.metrics.core.Metric
import kafka.api.{ApiVersion, KAFKA_2_6_IV0}
import kafka.common.UnexpectedAppendOffsetException
import kafka.log.{Defaults => _, _}
import kafka.metrics.KafkaYammerMetrics
import kafka.server._
import kafka.server.checkpoints.OffsetCheckpoints
import kafka.utils._
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.errors.{ApiException, InconsistentTopicIdException, NotLeaderOrFollowerException, OffsetNotAvailableException, OffsetOutOfRangeException}
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.ListOffsetsRequest
import org.apache.kafka.common.utils.SystemTime
import org.apache.kafka.common.{IsolationLevel, TopicPartition, Uuid}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import org.mockito.ArgumentMatchers
import org.mockito.ArgumentMatchers.{any, anyString}
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import java.nio.ByteBuffer
import java.util.Optional
import java.util.concurrent.{CountDownLatch, Semaphore}
import kafka.server.epoch.LeaderEpochFileCache
import scala.jdk.CollectionConverters._
class PartitionTest extends AbstractPartitionTest {
@Test
def testLastFetchedOffsetValidation(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
def append(leaderEpoch: Int, count: Int): Unit = {
val recordArray = (1 to count).map { i =>
new SimpleRecord(s"$i".getBytes)
}
val records = MemoryRecords.withRecords(0L, CompressionType.NONE, leaderEpoch,
recordArray: _*)
log.appendAsLeader(records, leaderEpoch = leaderEpoch)
}
append(leaderEpoch = 0, count = 2) // 0
append(leaderEpoch = 3, count = 3) // 2
append(leaderEpoch = 3, count = 3) // 5
append(leaderEpoch = 4, count = 5) // 8
append(leaderEpoch = 7, count = 1) // 13
append(leaderEpoch = 9, count = 3) // 14
assertEquals(17L, log.logEndOffset)
val leaderEpoch = 10
val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true)
def epochEndOffset(epoch: Int, endOffset: Long): FetchResponseData.EpochEndOffset = {
new FetchResponseData.EpochEndOffset()
.setEpoch(epoch)
.setEndOffset(endOffset)
}
def read(lastFetchedEpoch: Int, fetchOffset: Long): LogReadInfo = {
partition.readRecords(
Optional.of(lastFetchedEpoch),
fetchOffset,
currentLeaderEpoch = Optional.of(leaderEpoch),
maxBytes = Int.MaxValue,
fetchIsolation = FetchLogEnd,
fetchOnlyFromLeader = true,
minOneMessage = true
)
}
def assertDivergence(
divergingEpoch: FetchResponseData.EpochEndOffset,
readInfo: LogReadInfo
): Unit = {
assertEquals(Some(divergingEpoch), readInfo.divergingEpoch)
assertEquals(0, readInfo.fetchedData.records.sizeInBytes)
}
def assertNoDivergence(readInfo: LogReadInfo): Unit = {
assertEquals(None, readInfo.divergingEpoch)
}
assertDivergence(epochEndOffset(epoch = 0, endOffset = 2), read(lastFetchedEpoch = 2, fetchOffset = 5))
assertDivergence(epochEndOffset(epoch = 0, endOffset= 2), read(lastFetchedEpoch = 0, fetchOffset = 4))
assertDivergence(epochEndOffset(epoch = 4, endOffset = 13), read(lastFetchedEpoch = 6, fetchOffset = 6))
assertDivergence(epochEndOffset(epoch = 4, endOffset = 13), read(lastFetchedEpoch = 5, fetchOffset = 9))
assertDivergence(epochEndOffset(epoch = 10, endOffset = 17), read(lastFetchedEpoch = 10, fetchOffset = 18))
assertNoDivergence(read(lastFetchedEpoch = 0, fetchOffset = 2))
assertNoDivergence(read(lastFetchedEpoch = 7, fetchOffset = 14))
assertNoDivergence(read(lastFetchedEpoch = 9, fetchOffset = 17))
assertNoDivergence(read(lastFetchedEpoch = 10, fetchOffset = 17))
// Reads from epochs larger than we know about should cause an out of range error
assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 11, fetchOffset = 5))
// Move log start offset to the middle of epoch 3
log.updateHighWatermark(log.logEndOffset)
log.maybeIncrementLogStartOffset(newLogStartOffset = 5L, ClientRecordDeletion)
assertDivergence(epochEndOffset(epoch = 2, endOffset = 5), read(lastFetchedEpoch = 2, fetchOffset = 8))
assertNoDivergence(read(lastFetchedEpoch = 0, fetchOffset = 5))
assertNoDivergence(read(lastFetchedEpoch = 3, fetchOffset = 5))
assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 0, fetchOffset = 0))
// Fetch offset lower than start offset should throw OffsetOutOfRangeException
log.maybeIncrementLogStartOffset(newLogStartOffset = 10, ClientRecordDeletion)
assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 5, fetchOffset = 6)) // diverging
assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 3, fetchOffset = 6)) // not diverging
}
@Test
def testMakeLeaderUpdatesEpochCache(): Unit = {
val leaderEpoch = 8
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
log.appendAsLeader(MemoryRecords.withRecords(0L, CompressionType.NONE, 0,
new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)
), leaderEpoch = 0)
log.appendAsLeader(MemoryRecords.withRecords(0L, CompressionType.NONE, 5,
new SimpleRecord("k3".getBytes, "v3".getBytes),
new SimpleRecord("k4".getBytes, "v4".getBytes)
), leaderEpoch = 5)
assertEquals(4, log.logEndOffset)
val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true)
assertEquals(Some(4), partition.leaderLogIfLocal.map(_.logEndOffset))
val epochEndOffset = partition.lastOffsetForLeaderEpoch(currentLeaderEpoch = Optional.of[Integer](leaderEpoch),
leaderEpoch = leaderEpoch, fetchOnlyFromLeader = true)
assertEquals(4, epochEndOffset.endOffset)
assertEquals(leaderEpoch, epochEndOffset.leaderEpoch)
}
// Verify that partition.removeFutureLocalReplica() and partition.maybeReplaceCurrentWithFutureReplica() can run concurrently
@Test
def testMaybeReplaceCurrentWithFutureReplica(): Unit = {
val latch = new CountDownLatch(1)
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir1.getAbsolutePath)
partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, None)
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir2.getAbsolutePath)
partition.maybeCreateFutureReplica(logDir2.getAbsolutePath, offsetCheckpoints)
val thread1 = new Thread {
override def run(): Unit = {
latch.await()
partition.removeFutureLocalReplica()
}
}
val thread2 = new Thread {
override def run(): Unit = {
latch.await()
partition.maybeReplaceCurrentWithFutureReplica()
}
}
thread1.start()
thread2.start()
latch.countDown()
thread1.join()
thread2.join()
assertEquals(None, partition.futureLog)
}
// Verify that partition.makeFollower() and partition.appendRecordsToFollowerOrFutureReplica() can run concurrently
@Test
def testMakeFollowerWithWithFollowerAppendRecords(): Unit = {
val appendSemaphore = new Semaphore(0)
val mockTime = new MockTime()
partition = new Partition(
topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
logManager,
alterIsrManager) {
override def createLog(isNew: Boolean, isFutureReplica: Boolean, offsetCheckpoints: OffsetCheckpoints, topicId: Option[Uuid]): UnifiedLog = {
val log = super.createLog(isNew, isFutureReplica, offsetCheckpoints, None)
val logDirFailureChannel = new LogDirFailureChannel(1)
val segments = new LogSegments(log.topicPartition)
val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(log.dir, log.topicPartition, logDirFailureChannel, log.config.recordVersion, "")
val maxTransactionTimeoutMs = 5 * 60 * 1000
val maxProducerIdExpirationMs = 60 * 60 * 1000
val producerStateManager = new ProducerStateManager(
log.topicPartition,
log.dir,
maxTransactionTimeoutMs,
maxProducerIdExpirationMs,
mockTime
)
val offsets = new LogLoader(
log.dir,
log.topicPartition,
log.config,
mockTime.scheduler,
mockTime,
logDirFailureChannel,
hadCleanShutdown = true,
segments = segments,
logStartOffsetCheckpoint = 0L,
recoveryPointCheckpoint = 0L,
leaderEpochCache,
producerStateManager
).load()
val localLog = new LocalLog(log.dir, log.config, segments, offsets.recoveryPoint,
offsets.nextOffsetMetadata, mockTime.scheduler, mockTime, log.topicPartition,
logDirFailureChannel)
new SlowLog(log, offsets.logStartOffset, localLog, leaderEpochCache, producerStateManager, appendSemaphore)
}
}
partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, None)
val appendThread = new Thread {
override def run(): Unit = {
val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)),
baseOffset = 0)
partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false)
}
}
appendThread.start()
TestUtils.waitUntilTrue(() => appendSemaphore.hasQueuedThreads, "follower log append is not called.")
val partitionState = new LeaderAndIsrPartitionState()
.setControllerEpoch(0)
.setLeader(2)
.setLeaderEpoch(1)
.setIsr(List[Integer](0, 1, 2, brokerId).asJava)
.setZkVersion(1)
.setReplicas(List[Integer](0, 1, 2, brokerId).asJava)
.setIsNew(false)
assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None))
appendSemaphore.release()
appendThread.join()
assertEquals(2L, partition.localLogOrException.logEndOffset)
assertEquals(2L, partition.leaderReplicaIdOpt.get)
}
@Test
// Verify that replacement works when the replicas have the same log end offset but different base offsets in the
// active segment
def testMaybeReplaceCurrentWithFutureReplicaDifferentBaseOffsets(): Unit = {
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir1.getAbsolutePath)
partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, None)
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir2.getAbsolutePath)
partition.maybeCreateFutureReplica(logDir2.getAbsolutePath, offsetCheckpoints)
// Write records with duplicate keys to current replica and roll at offset 6
val currentLog = partition.log.get
currentLog.appendAsLeader(MemoryRecords.withRecords(0L, CompressionType.NONE, 0,
new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k1".getBytes, "v2".getBytes),
new SimpleRecord("k1".getBytes, "v3".getBytes),
new SimpleRecord("k2".getBytes, "v4".getBytes),
new SimpleRecord("k2".getBytes, "v5".getBytes),
new SimpleRecord("k2".getBytes, "v6".getBytes)
), leaderEpoch = 0)
currentLog.roll()
currentLog.appendAsLeader(MemoryRecords.withRecords(0L, CompressionType.NONE, 0,
new SimpleRecord("k3".getBytes, "v7".getBytes),
new SimpleRecord("k4".getBytes, "v8".getBytes)
), leaderEpoch = 0)
// Write to the future replica as if the log had been compacted, and do not roll the segment
val buffer = ByteBuffer.allocate(1024)
val builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE,
TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, 0)
builder.appendWithOffset(2L, new SimpleRecord("k1".getBytes, "v3".getBytes))
builder.appendWithOffset(5L, new SimpleRecord("k2".getBytes, "v6".getBytes))
builder.appendWithOffset(6L, new SimpleRecord("k3".getBytes, "v7".getBytes))
builder.appendWithOffset(7L, new SimpleRecord("k4".getBytes, "v8".getBytes))
val futureLog = partition.futureLocalLogOrException
futureLog.appendAsFollower(builder.build())
assertTrue(partition.maybeReplaceCurrentWithFutureReplica())
}
@Test
def testFetchOffsetSnapshotEpochValidationForLeader(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true)
def assertSnapshotError(expectedError: Errors, currentLeaderEpoch: Optional[Integer]): Unit = {
try {
partition.fetchOffsetSnapshot(currentLeaderEpoch, fetchOnlyFromLeader = true)
assertEquals(Errors.NONE, expectedError)
} catch {
case error: ApiException => assertEquals(expectedError, Errors.forException(error))
}
}
assertSnapshotError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1))
assertSnapshotError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1))
assertSnapshotError(Errors.NONE, Optional.of(leaderEpoch))
assertSnapshotError(Errors.NONE, Optional.empty())
}
@Test
def testFetchOffsetSnapshotEpochValidationForFollower(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = false)
def assertSnapshotError(expectedError: Errors,
currentLeaderEpoch: Optional[Integer],
fetchOnlyLeader: Boolean): Unit = {
try {
partition.fetchOffsetSnapshot(currentLeaderEpoch, fetchOnlyFromLeader = fetchOnlyLeader)
assertEquals(Errors.NONE, expectedError)
} catch {
case error: ApiException => assertEquals(expectedError, Errors.forException(error))
}
}
assertSnapshotError(Errors.NONE, Optional.of(leaderEpoch), fetchOnlyLeader = false)
assertSnapshotError(Errors.NONE, Optional.empty(), fetchOnlyLeader = false)
assertSnapshotError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = false)
assertSnapshotError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = false)
assertSnapshotError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.of(leaderEpoch), fetchOnlyLeader = true)
assertSnapshotError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.empty(), fetchOnlyLeader = true)
assertSnapshotError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = true)
assertSnapshotError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = true)
}
@Test
def testOffsetForLeaderEpochValidationForLeader(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true)
def assertLastOffsetForLeaderError(error: Errors, currentLeaderEpochOpt: Optional[Integer]): Unit = {
val endOffset = partition.lastOffsetForLeaderEpoch(currentLeaderEpochOpt, 0,
fetchOnlyFromLeader = true)
assertEquals(error.code, endOffset.errorCode)
}
assertLastOffsetForLeaderError(Errors.NONE, Optional.empty())
assertLastOffsetForLeaderError(Errors.NONE, Optional.of(leaderEpoch))
assertLastOffsetForLeaderError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1))
assertLastOffsetForLeaderError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1))
}
@Test
def testOffsetForLeaderEpochValidationForFollower(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = false)
def assertLastOffsetForLeaderError(error: Errors,
currentLeaderEpochOpt: Optional[Integer],
fetchOnlyLeader: Boolean): Unit = {
val endOffset = partition.lastOffsetForLeaderEpoch(currentLeaderEpochOpt, 0,
fetchOnlyFromLeader = fetchOnlyLeader)
assertEquals(error.code, endOffset.errorCode)
}
assertLastOffsetForLeaderError(Errors.NONE, Optional.empty(), fetchOnlyLeader = false)
assertLastOffsetForLeaderError(Errors.NONE, Optional.of(leaderEpoch), fetchOnlyLeader = false)
assertLastOffsetForLeaderError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = false)
assertLastOffsetForLeaderError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = false)
assertLastOffsetForLeaderError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.empty(), fetchOnlyLeader = true)
assertLastOffsetForLeaderError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.of(leaderEpoch), fetchOnlyLeader = true)
assertLastOffsetForLeaderError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = true)
assertLastOffsetForLeaderError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = true)
}
@Test
def testReadRecordEpochValidationForLeader(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true)
def assertReadRecordsError(error: Errors,
currentLeaderEpochOpt: Optional[Integer]): Unit = {
try {
partition.readRecords(
lastFetchedEpoch = Optional.empty(),
fetchOffset = 0L,
currentLeaderEpoch = currentLeaderEpochOpt,
maxBytes = 1024,
fetchIsolation = FetchLogEnd,
fetchOnlyFromLeader = true,
minOneMessage = false)
if (error != Errors.NONE)
fail(s"Expected readRecords to fail with error $error")
} catch {
case e: Exception =>
assertEquals(error, Errors.forException(e))
}
}
assertReadRecordsError(Errors.NONE, Optional.empty())
assertReadRecordsError(Errors.NONE, Optional.of(leaderEpoch))
assertReadRecordsError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1))
assertReadRecordsError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1))
}
@Test
def testReadRecordEpochValidationForFollower(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = false)
def assertReadRecordsError(error: Errors,
currentLeaderEpochOpt: Optional[Integer],
fetchOnlyLeader: Boolean): Unit = {
try {
partition.readRecords(
lastFetchedEpoch = Optional.empty(),
fetchOffset = 0L,
currentLeaderEpoch = currentLeaderEpochOpt,
maxBytes = 1024,
fetchIsolation = FetchLogEnd,
fetchOnlyFromLeader = fetchOnlyLeader,
minOneMessage = false)
if (error != Errors.NONE)
fail(s"Expected readRecords to fail with error $error")
} catch {
case e: Exception =>
assertEquals(error, Errors.forException(e))
}
}
assertReadRecordsError(Errors.NONE, Optional.empty(), fetchOnlyLeader = false)
assertReadRecordsError(Errors.NONE, Optional.of(leaderEpoch), fetchOnlyLeader = false)
assertReadRecordsError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = false)
assertReadRecordsError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = false)
assertReadRecordsError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.empty(), fetchOnlyLeader = true)
assertReadRecordsError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.of(leaderEpoch), fetchOnlyLeader = true)
assertReadRecordsError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = true)
assertReadRecordsError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = true)
}
@Test
def testFetchOffsetForTimestampEpochValidationForLeader(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true)
def assertFetchOffsetError(error: Errors,
currentLeaderEpochOpt: Optional[Integer]): Unit = {
try {
partition.fetchOffsetForTimestamp(0L,
isolationLevel = None,
currentLeaderEpoch = currentLeaderEpochOpt,
fetchOnlyFromLeader = true)
if (error != Errors.NONE)
fail(s"Expected readRecords to fail with error $error")
} catch {
case e: Exception =>
assertEquals(error, Errors.forException(e))
}
}
assertFetchOffsetError(Errors.NONE, Optional.empty())
assertFetchOffsetError(Errors.NONE, Optional.of(leaderEpoch))
assertFetchOffsetError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1))
assertFetchOffsetError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1))
}
@Test
def testFetchOffsetForTimestampEpochValidationForFollower(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = false)
def assertFetchOffsetError(error: Errors,
currentLeaderEpochOpt: Optional[Integer],
fetchOnlyLeader: Boolean): Unit = {
try {
partition.fetchOffsetForTimestamp(0L,
isolationLevel = None,
currentLeaderEpoch = currentLeaderEpochOpt,
fetchOnlyFromLeader = fetchOnlyLeader)
if (error != Errors.NONE)
fail(s"Expected readRecords to fail with error $error")
} catch {
case e: Exception =>
assertEquals(error, Errors.forException(e))
}
}
assertFetchOffsetError(Errors.NONE, Optional.empty(), fetchOnlyLeader = false)
assertFetchOffsetError(Errors.NONE, Optional.of(leaderEpoch), fetchOnlyLeader = false)
assertFetchOffsetError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = false)
assertFetchOffsetError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = false)
assertFetchOffsetError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.empty(), fetchOnlyLeader = true)
assertFetchOffsetError(Errors.NOT_LEADER_OR_FOLLOWER, Optional.of(leaderEpoch), fetchOnlyLeader = true)
assertFetchOffsetError(Errors.FENCED_LEADER_EPOCH, Optional.of(leaderEpoch - 1), fetchOnlyLeader = true)
assertFetchOffsetError(Errors.UNKNOWN_LEADER_EPOCH, Optional.of(leaderEpoch + 1), fetchOnlyLeader = true)
}
@Test
def testFetchLatestOffsetIncludesLeaderEpoch(): Unit = {
val leaderEpoch = 5
val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true)
val timestampAndOffsetOpt = partition.fetchOffsetForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP,
isolationLevel = None,
currentLeaderEpoch = Optional.empty(),
fetchOnlyFromLeader = true)
assertTrue(timestampAndOffsetOpt.isDefined)
val timestampAndOffset = timestampAndOffsetOpt.get
assertEquals(leaderEpoch, timestampAndOffset.leaderEpoch.get)
}
/**
* This test checks that after a new leader election, we don't answer any ListOffsetsRequest until
* the HW of the new leader has caught up to its startLogOffset for this epoch. From a client
* perspective this helps guarantee monotonic offsets
*
* @see <a href="https://cwiki.apache.org/confluence/display/KAFKA/KIP-207%3A+Offsets+returned+by+ListOffsetsResponse+should+be+monotonically+increasing+even+during+a+partition+leader+change">KIP-207</a>
*/
@Test
def testMonotonicOffsetsAfterLeaderChange(): Unit = {
val controllerEpoch = 3
val leader = brokerId
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val replicas = List(leader, follower1, follower2)
val isr = List[Integer](leader, follower2).asJava
val leaderEpoch = 8
val batch1 = TestUtils.records(records = List(
new SimpleRecord(10, "k1".getBytes, "v1".getBytes),
new SimpleRecord(11,"k2".getBytes, "v2".getBytes)))
val batch2 = TestUtils.records(records = List(new SimpleRecord("k3".getBytes, "v1".getBytes),
new SimpleRecord(20,"k4".getBytes, "v2".getBytes),
new SimpleRecord(21,"k5".getBytes, "v3".getBytes)))
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(true)
assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, None), "Expected first makeLeader() to return 'leader changed'")
assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch")
assertEquals(Set[Integer](leader, follower2), partition.isrState.isr, "ISR")
val requestLocal = RequestLocal.withThreadConfinedCaching
// after makeLeader(() call, partition should know about all the replicas
// append records with initial leader epoch
partition.appendRecordsToLeader(batch1, origin = AppendOrigin.Client, requiredAcks = 0, requestLocal)
partition.appendRecordsToLeader(batch2, origin = AppendOrigin.Client, requiredAcks = 0, requestLocal)
assertEquals(partition.localLogOrException.logStartOffset, partition.localLogOrException.highWatermark,
"Expected leader's HW not move")
// let the follower in ISR move leader's HW to move further but below LEO
def updateFollowerFetchState(followerId: Int, fetchOffsetMetadata: LogOffsetMetadata): Unit = {
partition.updateFollowerFetchState(
followerId,
followerFetchOffsetMetadata = fetchOffsetMetadata,
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = partition.localLogOrException.logEndOffset)
}
def fetchOffsetsForTimestamp(timestamp: Long, isolation: Option[IsolationLevel]): Either[ApiException, Option[TimestampAndOffset]] = {
try {
Right(partition.fetchOffsetForTimestamp(
timestamp = timestamp,
isolationLevel = isolation,
currentLeaderEpoch = Optional.of(partition.getLeaderEpoch),
fetchOnlyFromLeader = true
))
} catch {
case e: ApiException => Left(e)
}
}
updateFollowerFetchState(follower1, LogOffsetMetadata(0))
updateFollowerFetchState(follower1, LogOffsetMetadata(2))
updateFollowerFetchState(follower2, LogOffsetMetadata(0))
updateFollowerFetchState(follower2, LogOffsetMetadata(2))
// Simulate successful ISR update
alterIsrManager.completeIsrUpdate(2)
// At this point, the leader has gotten 5 writes, but followers have only fetched two
assertEquals(2, partition.localLogOrException.highWatermark)
// Get the LEO
fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, None) match {
case Right(Some(offsetAndTimestamp)) => assertEquals(5, offsetAndTimestamp.offset)
case Right(None) => fail("Should have seen some offsets")
case Left(e) => fail("Should not have seen an error")
}
// Get the HW
fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) => assertEquals(2, offsetAndTimestamp.offset)
case Right(None) => fail("Should have seen some offsets")
case Left(e) => fail("Should not have seen an error")
}
// Get a offset beyond the HW by timestamp, get a None
assertEquals(Right(None), fetchOffsetsForTimestamp(30, Some(IsolationLevel.READ_UNCOMMITTED)))
// Make into a follower
val followerState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(follower2)
.setLeaderEpoch(leaderEpoch + 1)
.setIsr(isr)
.setZkVersion(4)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(false)
assertTrue(partition.makeFollower(followerState, offsetCheckpoints, None))
// Back to leader, this resets the startLogOffset for this epoch (to 2), we're now in the fault condition
val newLeaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch + 2)
.setIsr(isr)
.setZkVersion(5)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(false)
assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, None))
// Try to get offsets as a client
fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) => fail("Should have failed with OffsetNotAvailable")
case Right(None) => fail("Should have seen an error")
case Left(e: OffsetNotAvailableException) => // ok
case Left(e: ApiException) => fail(s"Expected OffsetNotAvailableException, got $e")
}
// If request is not from a client, we skip the check
fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, None) match {
case Right(Some(offsetAndTimestamp)) => assertEquals(5, offsetAndTimestamp.offset)
case Right(None) => fail("Should have seen some offsets")
case Left(e: ApiException) => fail(s"Got ApiException $e")
}
// If we request the earliest timestamp, we skip the check
fetchOffsetsForTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) => assertEquals(0, offsetAndTimestamp.offset)
case Right(None) => fail("Should have seen some offsets")
case Left(e: ApiException) => fail(s"Got ApiException $e")
}
// If we request an offset by timestamp earlier than the HW, we are ok
fetchOffsetsForTimestamp(11, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) =>
assertEquals(1, offsetAndTimestamp.offset)
assertEquals(11, offsetAndTimestamp.timestamp)
case Right(None) => fail("Should have seen some offsets")
case Left(e: ApiException) => fail(s"Got ApiException $e")
}
// Request an offset by timestamp beyond the HW, get an error now since we're in a bad state
fetchOffsetsForTimestamp(100, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) => fail("Should have failed")
case Right(None) => fail("Should have failed")
case Left(e: OffsetNotAvailableException) => // ok
case Left(e: ApiException) => fail(s"Should have seen OffsetNotAvailableException, saw $e")
}
// Next fetch from replicas, HW is moved up to 5 (ahead of the LEO)
updateFollowerFetchState(follower1, LogOffsetMetadata(5))
updateFollowerFetchState(follower2, LogOffsetMetadata(5))
// Simulate successful ISR update
alterIsrManager.completeIsrUpdate(6)
// Error goes away
fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match {
case Right(Some(offsetAndTimestamp)) => assertEquals(5, offsetAndTimestamp.offset)
case Right(None) => fail("Should have seen some offsets")
case Left(e: ApiException) => fail(s"Got ApiException $e")
}
// Now we see None instead of an error for out of range timestamp
assertEquals(Right(None), fetchOffsetsForTimestamp(100, Some(IsolationLevel.READ_UNCOMMITTED)))
}
@Test
def testAppendRecordsAsFollowerBelowLogStartOffset(): Unit = {
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val log = partition.localLogOrException
val initialLogStartOffset = 5L
partition.truncateFullyAndStartAt(initialLogStartOffset, isFuture = false)
assertEquals(initialLogStartOffset, log.logEndOffset,
s"Log end offset after truncate fully and start at $initialLogStartOffset:")
assertEquals(initialLogStartOffset, log.logStartOffset,
s"Log start offset after truncate fully and start at $initialLogStartOffset:")
// verify that we cannot append records that do not contain log start offset even if the log is empty
assertThrows(classOf[UnexpectedAppendOffsetException], () =>
// append one record with offset = 3
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), isFuture = false)
)
assertEquals(initialLogStartOffset, log.logEndOffset,
s"Log end offset should not change after failure to append")
// verify that we can append records that contain log start offset, even when first
// offset < log start offset if the log is empty
val newLogStartOffset = 4L
val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes),
new SimpleRecord("k3".getBytes, "v3".getBytes)),
baseOffset = newLogStartOffset)
partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false)
assertEquals(7L, log.logEndOffset, s"Log end offset after append of 3 records with base offset $newLogStartOffset:")
assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset after append of 3 records with base offset $newLogStartOffset:")
// and we can append more records after that
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), isFuture = false)
assertEquals(8L, log.logEndOffset, s"Log end offset after append of 1 record at offset 7:")
assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset not expected to change:")
// but we cannot append to offset < log start if the log is not empty
val records2 = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)),
baseOffset = 3L)
assertThrows(classOf[UnexpectedAppendOffsetException], () => partition.appendRecordsToFollowerOrFutureReplica(records2, isFuture = false))
assertEquals(8L, log.logEndOffset, s"Log end offset should not change after failure to append")
// we still can append to next offset
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), isFuture = false)
assertEquals(9L, log.logEndOffset, s"Log end offset after append of 1 record at offset 8:")
assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset not expected to change:")
}
@Test
def testListOffsetIsolationLevels(): Unit = {
val controllerEpoch = 0
val leaderEpoch = 5
val replicas = List[Integer](brokerId, brokerId + 1).asJava
val isr = replicas
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
assertTrue(partition.makeLeader(new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed")
assertEquals(leaderEpoch, partition.getLeaderEpoch)
val records = createTransactionalRecords(List(
new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes),
new SimpleRecord("k3".getBytes, "v3".getBytes)),
baseOffset = 0L)
partition.appendRecordsToLeader(records, origin = AppendOrigin.Client, requiredAcks = 0, RequestLocal.withThreadConfinedCaching)
def fetchLatestOffset(isolationLevel: Option[IsolationLevel]): TimestampAndOffset = {
val res = partition.fetchOffsetForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP,
isolationLevel = isolationLevel,
currentLeaderEpoch = Optional.empty(),
fetchOnlyFromLeader = true)
assertTrue(res.isDefined)
res.get
}
def fetchEarliestOffset(isolationLevel: Option[IsolationLevel]): TimestampAndOffset = {
val res = partition.fetchOffsetForTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP,
isolationLevel = isolationLevel,
currentLeaderEpoch = Optional.empty(),
fetchOnlyFromLeader = true)
assertTrue(res.isDefined)
res.get
}
assertEquals(3L, fetchLatestOffset(isolationLevel = None).offset)
assertEquals(0L, fetchLatestOffset(isolationLevel = Some(IsolationLevel.READ_UNCOMMITTED)).offset)
assertEquals(0L, fetchLatestOffset(isolationLevel = Some(IsolationLevel.READ_COMMITTED)).offset)
partition.log.get.updateHighWatermark(1L)
assertEquals(3L, fetchLatestOffset(isolationLevel = None).offset)
assertEquals(1L, fetchLatestOffset(isolationLevel = Some(IsolationLevel.READ_UNCOMMITTED)).offset)
assertEquals(0L, fetchLatestOffset(isolationLevel = Some(IsolationLevel.READ_COMMITTED)).offset)
assertEquals(0L, fetchEarliestOffset(isolationLevel = None).offset)
assertEquals(0L, fetchEarliestOffset(isolationLevel = Some(IsolationLevel.READ_UNCOMMITTED)).offset)
assertEquals(0L, fetchEarliestOffset(isolationLevel = Some(IsolationLevel.READ_COMMITTED)).offset)
}
@Test
def testGetReplica(): Unit = {
assertEquals(None, partition.log)
assertThrows(classOf[NotLeaderOrFollowerException], () =>
partition.localLogOrException
)
}
@Test
def testAppendRecordsToFollowerWithNoReplicaThrowsException(): Unit = {
assertThrows(classOf[NotLeaderOrFollowerException], () =>
partition.appendRecordsToFollowerOrFutureReplica(
createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), isFuture = false)
)
}
@Test
def testMakeFollowerWithNoLeaderIdChange(): Unit = {
// Start off as follower
var partitionState = new LeaderAndIsrPartitionState()
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(1)
.setIsr(List[Integer](0, 1, 2, brokerId).asJava)
.setZkVersion(1)
.setReplicas(List[Integer](0, 1, 2, brokerId).asJava)
.setIsNew(false)
partition.makeFollower(partitionState, offsetCheckpoints, None)
// Request with same leader and epoch increases by only 1, do become-follower steps
partitionState = new LeaderAndIsrPartitionState()
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(4)
.setIsr(List[Integer](0, 1, 2, brokerId).asJava)
.setZkVersion(1)
.setReplicas(List[Integer](0, 1, 2, brokerId).asJava)
.setIsNew(false)
assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None))
// Request with same leader and same epoch, skip become-follower steps
partitionState = new LeaderAndIsrPartitionState()
.setControllerEpoch(0)
.setLeader(1)
.setLeaderEpoch(4)
.setIsr(List[Integer](0, 1, 2, brokerId).asJava)
.setZkVersion(1)
.setReplicas(List[Integer](0, 1, 2, brokerId).asJava)
assertFalse(partition.makeFollower(partitionState, offsetCheckpoints, None))
}
@Test
def testFollowerDoesNotJoinISRUntilCaughtUpToOffsetWithinCurrentLeaderEpoch(): Unit = {
val controllerEpoch = 3
val leader = brokerId
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val replicas = List[Integer](leader, follower1, follower2).asJava
val isr = List[Integer](leader, follower2).asJava
val leaderEpoch = 8
val batch1 = TestUtils.records(records = List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)))
val batch2 = TestUtils.records(records = List(new SimpleRecord("k3".getBytes, "v1".getBytes),
new SimpleRecord("k4".getBytes, "v2".getBytes),
new SimpleRecord("k5".getBytes, "v3".getBytes)))
val batch3 = TestUtils.records(records = List(new SimpleRecord("k6".getBytes, "v1".getBytes),
new SimpleRecord("k7".getBytes, "v2".getBytes)))
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(true)
assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, None), "Expected first makeLeader() to return 'leader changed'")
assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch")
assertEquals(Set[Integer](leader, follower2), partition.isrState.isr, "ISR")
val requestLocal = RequestLocal.withThreadConfinedCaching
// after makeLeader(() call, partition should know about all the replicas
// append records with initial leader epoch
val lastOffsetOfFirstBatch = partition.appendRecordsToLeader(batch1, origin = AppendOrigin.Client,
requiredAcks = 0, requestLocal).lastOffset
partition.appendRecordsToLeader(batch2, origin = AppendOrigin.Client, requiredAcks = 0, requestLocal)
assertEquals(partition.localLogOrException.logStartOffset, partition.log.get.highWatermark, "Expected leader's HW not move")
// let the follower in ISR move leader's HW to move further but below LEO
def updateFollowerFetchState(followerId: Int, fetchOffsetMetadata: LogOffsetMetadata): Unit = {
partition.updateFollowerFetchState(
followerId,
followerFetchOffsetMetadata = fetchOffsetMetadata,
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = partition.localLogOrException.logEndOffset)
}
updateFollowerFetchState(follower2, LogOffsetMetadata(0))
updateFollowerFetchState(follower2, LogOffsetMetadata(lastOffsetOfFirstBatch))
assertEquals(lastOffsetOfFirstBatch, partition.log.get.highWatermark, "Expected leader's HW")
// current leader becomes follower and then leader again (without any new records appended)
val followerState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(follower2)
.setLeaderEpoch(leaderEpoch + 1)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
partition.makeFollower(followerState, offsetCheckpoints, None)
val newLeaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch + 2)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, None),
"Expected makeLeader() to return 'leader changed' after makeFollower()")
val currentLeaderEpochStartOffset = partition.localLogOrException.logEndOffset
// append records with the latest leader epoch
partition.appendRecordsToLeader(batch3, origin = AppendOrigin.Client, requiredAcks = 0, requestLocal)
// fetch from follower not in ISR from log start offset should not add this follower to ISR
updateFollowerFetchState(follower1, LogOffsetMetadata(0))
updateFollowerFetchState(follower1, LogOffsetMetadata(lastOffsetOfFirstBatch))
assertEquals(Set[Integer](leader, follower2), partition.isrState.isr, "ISR")
// fetch from the follower not in ISR from start offset of the current leader epoch should
// add this follower to ISR
updateFollowerFetchState(follower1, LogOffsetMetadata(currentLeaderEpochStartOffset))
// Expansion does not affect the ISR
assertEquals(Set[Integer](leader, follower2), partition.isrState.isr, "ISR")
assertEquals(Set[Integer](leader, follower1, follower2), partition.isrState.maximalIsr, "ISR")
assertEquals(alterIsrManager.isrUpdates.head.leaderAndIsr.isr.toSet,
Set(leader, follower1, follower2), "AlterIsr")
}
def createRecords(records: Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = {
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(
buf, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.LOG_APPEND_TIME,
baseOffset, time.milliseconds, partitionLeaderEpoch)
records.foreach(builder.append)
builder.build()
}
def createTransactionalRecords(records: Iterable[SimpleRecord],
baseOffset: Long): MemoryRecords = {
val producerId = 1L
val producerEpoch = 0.toShort
val baseSequence = 0
val isTransactional = true
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, CompressionType.NONE, baseOffset, producerId,
producerEpoch, baseSequence, isTransactional)
records.foreach(builder.append)
builder.build()
}
/**
* Test for AtMinIsr partition state. We set the partition replica set size as 3, but only set one replica as an ISR.
* As the default minIsr configuration is 1, then the partition should be at min ISR (isAtMinIsr = true).
*/
@Test
def testAtMinIsr(): Unit = {
val controllerEpoch = 3
val leader = brokerId
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val replicas = List[Integer](leader, follower1, follower2).asJava
val isr = List[Integer](leader).asJava
val leaderEpoch = 8
assertFalse(partition.isAtMinIsr)
// Make isr set to only have leader to trigger AtMinIsr (default min isr config is 1)
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(leader)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(true)
partition.makeLeader(leaderState, offsetCheckpoints, None)
assertTrue(partition.isAtMinIsr)
}
@Test
def testUpdateFollowerFetchState(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 6, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = List[Integer](brokerId, remoteBrokerId).asJava
val isr = replicas
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
val initializeTimeMs = time.milliseconds()
assertTrue(partition.makeLeader(
new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(true),
offsetCheckpoints, None), "Expected become leader transition to succeed")
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
time.sleep(500)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(3),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 6L)
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(3L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
time.sleep(500)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(6L),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 6L)
assertEquals(time.milliseconds(), remoteReplica.lastCaughtUpTimeMs)
assertEquals(6L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
}
@Test
def testIsrExpansion(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = List(brokerId, remoteBrokerId)
val isr = List[Integer](brokerId).asJava
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
assertTrue(partition.makeLeader(
new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(true),
offsetCheckpoints, None), "Expected become leader transition to succeed")
assertEquals(Set(brokerId), partition.isrState.isr)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(3),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 6L)
assertEquals(Set(brokerId), partition.isrState.isr)
assertEquals(3L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 6L)
assertEquals(alterIsrManager.isrUpdates.size, 1)
val isrItem = alterIsrManager.isrUpdates.head
assertEquals(isrItem.leaderAndIsr.isr, List(brokerId, remoteBrokerId))
assertEquals(Set(brokerId), partition.isrState.isr)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(10L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// Complete the ISR expansion
alterIsrManager.completeIsrUpdate(2)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(isrChangeListener.expands.get, 1)
assertEquals(isrChangeListener.shrinks.get, 0)
assertEquals(isrChangeListener.failures.get, 0)
}
@Test
def testIsrNotExpandedIfUpdateFails(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = List[Integer](brokerId, remoteBrokerId).asJava
val isr = List[Integer](brokerId).asJava
partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None)
assertTrue(partition.makeLeader(
new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(true),
offsetCheckpoints, None), "Expected become leader transition to succeed")
assertEquals(Set(brokerId), partition.isrState.isr)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 10L)
// Follower state is updated, but the ISR has not expanded
assertEquals(Set(brokerId), partition.inSyncReplicaIds)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(10L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// Simulate failure callback
alterIsrManager.failIsrUpdate(Errors.INVALID_UPDATE_VERSION)
// Still no ISR change
assertEquals(Set(brokerId), partition.inSyncReplicaIds)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(alterIsrManager.isrUpdates.size, 0)
assertEquals(isrChangeListener.expands.get, 0)
assertEquals(isrChangeListener.shrinks.get, 0)
assertEquals(isrChangeListener.failures.get, 1)
}
@Test
def testRetryShrinkIsr(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val topicId = Uuid.randomUuid()
assertTrue(makeLeader(
topicId = Some(topicId),
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
// Sleep enough time to shrink the ISR
time.sleep(partition.replicaLagTimeMaxMs + 1)
// Try to shrink the ISR
partition.maybeShrinkIsr()
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(alterIsrManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId))
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
// The shrink fails and we retry
alterIsrManager.failIsrUpdate(Errors.NETWORK_EXCEPTION)
assertEquals(0, isrChangeListener.shrinks.get)
assertEquals(1, isrChangeListener.failures.get)
assertEquals(1, partition.getZkVersion)
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(0L, partition.localLogOrException.highWatermark)
// The shrink succeeds after retrying
alterIsrManager.completeIsrUpdate(newZkVersion = 2)
assertEquals(1, isrChangeListener.shrinks.get)
assertEquals(2, partition.getZkVersion)
assertEquals(alterIsrManager.isrUpdates.size, 0)
assertEquals(Set(brokerId), partition.isrState.isr)
assertEquals(Set(brokerId), partition.isrState.maximalIsr)
assertEquals(log.logEndOffset, partition.localLogOrException.highWatermark)
}
@Test
def testMaybeShrinkIsr(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val initializeTimeMs = time.milliseconds()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
// On initialization, the replica is considered caught up and should not be removed
partition.maybeShrinkIsr()
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
// If enough time passes without a fetch update, the ISR should shrink
time.sleep(partition.replicaLagTimeMaxMs + 1)
// Shrink the ISR
partition.maybeShrinkIsr()
assertEquals(0, isrChangeListener.shrinks.get)
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(alterIsrManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId))
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(0L, partition.localLogOrException.highWatermark)
// After the ISR shrink completes, the ISR state should be updated and the
// high watermark should be advanced
alterIsrManager.completeIsrUpdate(newZkVersion = 2)
assertEquals(1, isrChangeListener.shrinks.get)
assertEquals(2, partition.getZkVersion)
assertEquals(alterIsrManager.isrUpdates.size, 0)
assertEquals(Set(brokerId), partition.isrState.isr)
assertEquals(Set(brokerId), partition.isrState.maximalIsr)
assertEquals(log.logEndOffset, partition.localLogOrException.highWatermark)
}
@Test
def testAlterIsrLeaderAndIsrRace(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val initializeTimeMs = time.milliseconds()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
// Shrink the ISR
time.sleep(partition.replicaLagTimeMaxMs + 1)
partition.maybeShrinkIsr()
assertTrue(partition.isrState.isInflight)
// Become leader again, reset the ISR state
assertFalse(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 2,
isNew = false
))
assertEquals(0L, partition.localLogOrException.highWatermark)
assertFalse(partition.isrState.isInflight, "ISR should be committed and not inflight")
// Try the shrink again, should not submit until AlterIsr response arrives
time.sleep(partition.replicaLagTimeMaxMs + 1)
partition.maybeShrinkIsr()
assertFalse(partition.isrState.isInflight, "ISR should still be committed and not inflight")
// Complete the AlterIsr update and now we can make modifications again
alterIsrManager.completeIsrUpdate(10)
partition.maybeShrinkIsr()
assertTrue(partition.isrState.isInflight, "ISR should be pending a shrink")
}
@Test
def testShouldNotShrinkIsrIfPreviousFetchIsCaughtUp(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val initializeTimeMs = time.milliseconds()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
// There is a short delay before the first fetch. The follower is not yet caught up to the log end.
time.sleep(5000)
val firstFetchTimeMs = time.milliseconds()
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(5),
followerStartOffset = 0L,
followerFetchTimeMs = firstFetchTimeMs,
leaderEndOffset = 10L)
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(5L, partition.localLogOrException.highWatermark)
assertEquals(5L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// Some new data is appended, but the follower catches up to the old end offset.
// The total elapsed time from initialization is larger than the max allowed replica lag.
time.sleep(5001)
seedLogData(log, numRecords = 5, leaderEpoch = leaderEpoch)
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 15L)
assertEquals(firstFetchTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(10L, partition.localLogOrException.highWatermark)
assertEquals(10L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// The ISR should not be shrunk because the follower has caught up with the leader at the
// time of the first fetch.
partition.maybeShrinkIsr()
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(alterIsrManager.isrUpdates.size, 0)
}
@Test
def testShouldNotShrinkIsrIfFollowerCaughtUpToLogEnd(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val initializeTimeMs = time.milliseconds()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
// The follower catches up to the log end immediately.
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 10L)
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(10L, partition.localLogOrException.highWatermark)
assertEquals(10L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// Sleep longer than the max allowed follower lag
time.sleep(30001)
// The ISR should not be shrunk because the follower is caught up to the leader's log end
partition.maybeShrinkIsr()
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.isr)
assertEquals(alterIsrManager.isrUpdates.size, 0)
}
@Test
def testIsrNotShrunkIfUpdateFails(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId, remoteBrokerId)
val initializeTimeMs = time.milliseconds()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(initializeTimeMs, remoteReplica.lastCaughtUpTimeMs)
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
time.sleep(30001)
// Enqueue and AlterIsr that will fail
partition.maybeShrinkIsr()
assertEquals(Set(brokerId, remoteBrokerId), partition.inSyncReplicaIds)
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(0L, partition.localLogOrException.highWatermark)
// Simulate failure callback
alterIsrManager.failIsrUpdate(Errors.INVALID_UPDATE_VERSION)
// Ensure ISR hasn't changed
assertEquals(partition.isrState.getClass, classOf[PendingShrinkIsr])
assertEquals(Set(brokerId, remoteBrokerId), partition.inSyncReplicaIds)
assertEquals(alterIsrManager.isrUpdates.size, 0)
assertEquals(0L, partition.localLogOrException.highWatermark)
}
@Test
def testAlterIsrUnknownTopic(): Unit = {
handleAlterIsrFailure(Errors.UNKNOWN_TOPIC_OR_PARTITION,
(brokerId: Int, remoteBrokerId: Int, partition: Partition) => {
assertEquals(partition.isrState.isr, Set(brokerId))
assertEquals(partition.isrState.maximalIsr, Set(brokerId, remoteBrokerId))
assertEquals(alterIsrManager.isrUpdates.size, 0)
})
}
@Test
def testAlterIsrInvalidVersion(): Unit = {
handleAlterIsrFailure(Errors.INVALID_UPDATE_VERSION,
(brokerId: Int, remoteBrokerId: Int, partition: Partition) => {
assertEquals(partition.isrState.isr, Set(brokerId))
assertEquals(partition.isrState.maximalIsr, Set(brokerId, remoteBrokerId))
assertEquals(alterIsrManager.isrUpdates.size, 0)
})
}
@Test
def testAlterIsrUnexpectedError(): Unit = {
handleAlterIsrFailure(Errors.UNKNOWN_SERVER_ERROR,
(brokerId: Int, remoteBrokerId: Int, partition: Partition) => {
// We retry these
assertEquals(partition.isrState.isr, Set(brokerId))
assertEquals(partition.isrState.maximalIsr, Set(brokerId, remoteBrokerId))
assertEquals(alterIsrManager.isrUpdates.size, 1)
})
}
def handleAlterIsrFailure(error: Errors, callback: (Int, Int, Partition) => Unit): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val remoteBrokerId = brokerId + 1
val replicas = Seq(brokerId, remoteBrokerId)
val isr = Seq(brokerId)
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(10L, partition.localLogOrException.highWatermark)
val remoteReplica = partition.getReplica(remoteBrokerId).get
assertEquals(LogOffsetMetadata.UnknownOffsetMetadata.messageOffset, remoteReplica.logEndOffset)
assertEquals(UnifiedLog.UnknownOffset, remoteReplica.logStartOffset)
// This will attempt to expand the ISR
partition.updateFollowerFetchState(remoteBrokerId,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 10L)
// Follower state is updated, but the ISR has not expanded
assertEquals(Set(brokerId), partition.inSyncReplicaIds)
assertEquals(Set(brokerId, remoteBrokerId), partition.isrState.maximalIsr)
assertEquals(alterIsrManager.isrUpdates.size, 1)
assertEquals(10L, remoteReplica.logEndOffset)
assertEquals(0L, remoteReplica.logStartOffset)
// Failure
alterIsrManager.failIsrUpdate(error)
callback(brokerId, remoteBrokerId, partition)
}
@Test
def testSingleInFlightAlterIsr(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val follower3 = brokerId + 3
val replicas = Seq(brokerId, follower1, follower2, follower3)
val isr = Seq(brokerId, follower1, follower2)
doNothing().when(delayedOperations).checkAndCompleteAll()
assertTrue(makeLeader(
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
// Expand ISR
partition.updateFollowerFetchState(
followerId = follower3,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 10
)
assertEquals(Set(brokerId, follower1, follower2), partition.isrState.isr)
assertEquals(Set(brokerId, follower1, follower2, follower3), partition.isrState.maximalIsr)
// One AlterIsr request in-flight
assertEquals(alterIsrManager.isrUpdates.size, 1)
// Try to modify ISR again, should do nothing
time.sleep(partition.replicaLagTimeMaxMs + 1)
partition.maybeShrinkIsr()
assertEquals(alterIsrManager.isrUpdates.size, 1)
}
@Test
def testZkIsrManagerAsyncCallback(): Unit = {
// We need a real scheduler here so that the ISR write lock works properly
val scheduler = new KafkaScheduler(1, "zk-isr-test")
scheduler.startup()
val kafkaZkClient = mock(classOf[KafkaZkClient])
doAnswer(_ => (true, 2))
.when(kafkaZkClient)
.conditionalUpdatePath(anyString(), any(), ArgumentMatchers.eq(1), any())
val zkIsrManager = AlterIsrManager(scheduler, time, kafkaZkClient)
zkIsrManager.start()
val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = KAFKA_2_6_IV0, // shouldn't matter, but set this to a ZK isr version
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
logManager,
zkIsrManager)
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 10, leaderEpoch = 4)
val controllerEpoch = 0
val leaderEpoch = 5
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val follower3 = brokerId + 3
val replicas = Seq(brokerId, follower1, follower2, follower3)
val isr = Seq(brokerId, follower1, follower2)
doNothing().when(delayedOperations).checkAndCompleteAll()
assertTrue(makeLeader(
partition = partition,
topicId = None,
controllerEpoch = controllerEpoch,
leaderEpoch = leaderEpoch,
isr = isr,
replicas = replicas,
zkVersion = 1,
isNew = true
))
assertEquals(0L, partition.localLogOrException.highWatermark)
// Expand ISR
partition.updateFollowerFetchState(
followerId = follower3,
followerFetchOffsetMetadata = LogOffsetMetadata(10),
followerStartOffset = 0L,
followerFetchTimeMs = time.milliseconds(),
leaderEndOffset = 10
)
// Try avoiding a race
TestUtils.waitUntilTrue(() => !partition.isrState.isInflight, "Expected ISR state to be committed", 100)
partition.isrState match {
case committed: CommittedIsr => assertEquals(Set(brokerId, follower1, follower2, follower3), committed.isr)
case _ => fail("Expected a committed ISR following Zk expansion")
}
scheduler.shutdown()
}
@Test
def testUseCheckpointToInitializeHighWatermark(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, topicId = None)
seedLogData(log, numRecords = 6, leaderEpoch = 5)
when(offsetCheckpoints.fetch(logDir1.getAbsolutePath, topicPartition))
.thenReturn(Some(4L))
val controllerEpoch = 3
val replicas = List[Integer](brokerId, brokerId + 1).asJava
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(6)
.setIsr(replicas)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
partition.makeLeader(leaderState, offsetCheckpoints, None)
assertEquals(4, partition.localLogOrException.highWatermark)
}
@Test
def testTopicIdAndPartitionMetadataFileForLeader(): Unit = {
val controllerEpoch = 3
val leaderEpoch = 5
val topicId = Uuid.randomUuid()
val replicas = List[Integer](brokerId, brokerId + 1).asJava
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(replicas)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
partition.makeLeader(leaderState, offsetCheckpoints, Some(topicId))
checkTopicId(topicId, partition)
// Create new Partition object for same topicPartition
val partition2 = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
logManager,
alterIsrManager)
// partition2 should not yet be associated with the log, but should be able to get ID
assertTrue(partition2.topicId.isDefined)
assertEquals(topicId, partition2.topicId.get)
assertFalse(partition2.log.isDefined)
// Calling makeLeader with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException.
// This scenario should not occur, since the topic ID check will fail.
assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeLeader(leaderState, offsetCheckpoints, Some(Uuid.randomUuid())))
// Calling makeLeader with no topic ID should not overwrite the old topic ID. We should get the original log.
partition2.makeLeader(leaderState, offsetCheckpoints, None)
checkTopicId(topicId, partition2)
}
@Test
def testTopicIdAndPartitionMetadataFileForFollower(): Unit = {
val controllerEpoch = 3
val leaderEpoch = 5
val topicId = Uuid.randomUuid()
val replicas = List[Integer](brokerId, brokerId + 1).asJava
val leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(replicas)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
partition.makeFollower(leaderState, offsetCheckpoints, Some(topicId))
checkTopicId(topicId, partition)
// Create new Partition object for same topicPartition
val partition2 = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
logManager,
alterIsrManager)
// partition2 should not yet be associated with the log, but should be able to get ID
assertTrue(partition2.topicId.isDefined)
assertEquals(topicId, partition2.topicId.get)
assertFalse(partition2.log.isDefined)
// Calling makeFollower with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException.
// This scenario should not occur, since the topic ID check will fail.
assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeFollower(leaderState, offsetCheckpoints, Some(Uuid.randomUuid())))
// Calling makeFollower with no topic ID should not overwrite the old topic ID. We should get the original log.
partition2.makeFollower(leaderState, offsetCheckpoints, None)
checkTopicId(topicId, partition2)
}
def checkTopicId(expectedTopicId: Uuid, partition: Partition): Unit = {
assertTrue(partition.topicId.isDefined)
assertEquals(expectedTopicId, partition.topicId.get)
assertTrue(partition.log.isDefined)
val log = partition.log.get
assertEquals(expectedTopicId, log.topicId.get)
assertTrue(log.partitionMetadataFile.exists())
assertEquals(expectedTopicId, log.partitionMetadataFile.read().topicId)
}
@Test
def testAddAndRemoveMetrics(): Unit = {
val metricsToCheck = List(
"UnderReplicated",
"UnderMinIsr",
"InSyncReplicasCount",
"ReplicasCount",
"LastStableOffsetLag",
"AtMinIsr")
def getMetric(metric: String): Option[Metric] = {
KafkaYammerMetrics.defaultRegistry().allMetrics().asScala.filter { case (metricName, _) =>
metricName.getName == metric && metricName.getType == "Partition"
}.headOption.map(_._2)
}
assertTrue(metricsToCheck.forall(getMetric(_).isDefined))
Partition.removeMetrics(topicPartition)
assertEquals(Set(), KafkaYammerMetrics.defaultRegistry().allMetrics().asScala.keySet.filter(_.getType == "Partition"))
}
@Test
def testUnderReplicatedPartitionsCorrectSemantics(): Unit = {
val controllerEpoch = 3
val replicas = List[Integer](brokerId, brokerId + 1, brokerId + 2).asJava
val isr = List[Integer](brokerId, brokerId + 1).asJava
var leaderState = new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(6)
.setIsr(isr)
.setZkVersion(1)
.setReplicas(replicas)
.setIsNew(false)
partition.makeLeader(leaderState, offsetCheckpoints, None)
assertTrue(partition.isUnderReplicated)
leaderState = leaderState.setIsr(replicas)
partition.makeLeader(leaderState, offsetCheckpoints, None)
assertFalse(partition.isUnderReplicated)
}
@Test
def testUpdateAssignmentAndIsr(): Unit = {
val topicPartition = new TopicPartition("test", 1)
val partition = new Partition(
topicPartition, 1000, ApiVersion.latestVersion, 0,
new SystemTime(), mock(classOf[IsrChangeListener]), mock(classOf[DelayedOperations]),
mock(classOf[MetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterIsrManager]))
val replicas = Seq(0, 1, 2, 3)
val isr = Set(0, 1, 2, 3)
val adding = Seq(4, 5)
val removing = Seq(1, 2)
// Test with ongoing reassignment
partition.updateAssignmentAndIsr(replicas, isr, adding, removing)
assertTrue(partition.assignmentState.isInstanceOf[OngoingReassignmentState], "The assignmentState is not OngoingReassignmentState")
assertEquals(replicas, partition.assignmentState.replicas)
assertEquals(isr, partition.isrState.isr)
assertEquals(adding, partition.assignmentState.asInstanceOf[OngoingReassignmentState].addingReplicas)
assertEquals(removing, partition.assignmentState.asInstanceOf[OngoingReassignmentState].removingReplicas)
assertEquals(Seq(1, 2, 3), partition.remoteReplicas.map(_.brokerId))
// Test with simple assignment
val replicas2 = Seq(0, 3, 4, 5)
val isr2 = Set(0, 3, 4, 5)
partition.updateAssignmentAndIsr(replicas2, isr2, Seq.empty, Seq.empty)
assertTrue(partition.assignmentState.isInstanceOf[SimpleAssignmentState], "The assignmentState is not SimpleAssignmentState")
assertEquals(replicas2, partition.assignmentState.replicas)
assertEquals(isr2, partition.isrState.isr)
assertEquals(Seq(3, 4, 5), partition.remoteReplicas.map(_.brokerId))
}
/**
* Test when log is getting initialized, its config remains untouched after initialization is done.
*/
@Test
def testLogConfigNotDirty(): Unit = {
logManager.shutdown()
val spyConfigRepository = spy(configRepository)
logManager = TestUtils.createLogManager(
logDirs = Seq(logDir1, logDir2), defaultConfig = logConfig, configRepository = spyConfigRepository,
cleanerConfig = CleanerConfig(enableCleaner = false), time = time)
val spyLogManager = spy(logManager)
val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
spyLogManager,
alterIsrManager)
partition.createLog(isNew = true, isFutureReplica = false, offsetCheckpoints, topicId = None)
// Validate that initializingLog and finishedInitializingLog was called
verify(spyLogManager).initializingLog(ArgumentMatchers.eq(topicPartition))
verify(spyLogManager).finishedInitializingLog(ArgumentMatchers.eq(topicPartition), ArgumentMatchers.any())
// We should retrieve configs only once
verify(spyConfigRepository, times(1)).topicConfig(topicPartition.topic())
}
/**
* Test when log is getting initialized, its config remains gets reloaded if Topic config gets changed
* before initialization is done.
*/
@Test
def testLogConfigDirtyAsTopicUpdated(): Unit = {
logManager.shutdown()
val spyConfigRepository = spy(configRepository)
logManager = TestUtils.createLogManager(
logDirs = Seq(logDir1, logDir2), defaultConfig = logConfig, configRepository = spyConfigRepository,
cleanerConfig = CleanerConfig(enableCleaner = false), time = time)
val spyLogManager = spy(logManager)
doAnswer((_: InvocationOnMock) => {
logManager.initializingLog(topicPartition)
logManager.topicConfigUpdated(topicPartition.topic())
}).when(spyLogManager).initializingLog(ArgumentMatchers.eq(topicPartition))
val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
spyLogManager,
alterIsrManager)
partition.createLog(isNew = true, isFutureReplica = false, offsetCheckpoints, topicId = None)
// Validate that initializingLog and finishedInitializingLog was called
verify(spyLogManager).initializingLog(ArgumentMatchers.eq(topicPartition))
verify(spyLogManager).finishedInitializingLog(ArgumentMatchers.eq(topicPartition), ArgumentMatchers.any())
// We should retrieve configs twice, once before log is created, and second time once
// we find log config is dirty and refresh it.
verify(spyConfigRepository, times(2)).topicConfig(topicPartition.topic())
}
/**
* Test when log is getting initialized, its config remains gets reloaded if Broker config gets changed
* before initialization is done.
*/
@Test
def testLogConfigDirtyAsBrokerUpdated(): Unit = {
logManager.shutdown()
val spyConfigRepository = spy(configRepository)
logManager = TestUtils.createLogManager(
logDirs = Seq(logDir1, logDir2), defaultConfig = logConfig, configRepository = spyConfigRepository,
cleanerConfig = CleanerConfig(enableCleaner = false), time = time)
logManager.startup(Set.empty)
val spyLogManager = spy(logManager)
doAnswer((_: InvocationOnMock) => {
logManager.initializingLog(topicPartition)
logManager.brokerConfigUpdated()
}).when(spyLogManager).initializingLog(ArgumentMatchers.eq(topicPartition))
val partition = new Partition(topicPartition,
replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs,
interBrokerProtocolVersion = ApiVersion.latestVersion,
localBrokerId = brokerId,
time,
isrChangeListener,
delayedOperations,
metadataCache,
spyLogManager,
alterIsrManager)
partition.createLog(isNew = true, isFutureReplica = false, offsetCheckpoints, topicId = None)
// Validate that initializingLog and finishedInitializingLog was called
verify(spyLogManager).initializingLog(ArgumentMatchers.eq(topicPartition))
verify(spyLogManager).finishedInitializingLog(ArgumentMatchers.eq(topicPartition), ArgumentMatchers.any())
// We should get configs twice, once before log is created, and second time once
// we find log config is dirty and refresh it.
verify(spyConfigRepository, times(2)).topicConfig(topicPartition.topic())
}
private def makeLeader(
topicId: Option[Uuid],
controllerEpoch: Int,
leaderEpoch: Int,
isr: Seq[Int],
replicas: Seq[Int],
zkVersion: Int,
isNew: Boolean,
partition: Partition = partition
): Boolean = {
partition.createLogIfNotExists(
isNew = isNew,
isFutureReplica = false,
offsetCheckpoints,
topicId
)
val newLeader = partition.makeLeader(
new LeaderAndIsrPartitionState()
.setControllerEpoch(controllerEpoch)
.setLeader(brokerId)
.setLeaderEpoch(leaderEpoch)
.setIsr(isr.map(Int.box).asJava)
.setZkVersion(zkVersion)
.setReplicas(replicas.map(Int.box).asJava)
.setIsNew(isNew),
offsetCheckpoints,
topicId
)
assertTrue(partition.isLeader)
assertFalse(partition.isrState.isInflight)
assertEquals(topicId, partition.topicId)
assertEquals(leaderEpoch, partition.getLeaderEpoch)
assertEquals(isr.toSet, partition.isrState.isr)
assertEquals(isr.toSet, partition.isrState.maximalIsr)
assertEquals(zkVersion, partition.getZkVersion)
newLeader
}
private def seedLogData(log: UnifiedLog, numRecords: Int, leaderEpoch: Int): Unit = {
for (i <- 0 until numRecords) {
val records = MemoryRecords.withRecords(0L, CompressionType.NONE, leaderEpoch,
new SimpleRecord(s"k$i".getBytes, s"v$i".getBytes))
log.appendAsLeader(records, leaderEpoch)
}
}
private class SlowLog(
log: UnifiedLog,
logStartOffset: Long,
localLog: LocalLog,
leaderEpochCache: Option[LeaderEpochFileCache],
producerStateManager: ProducerStateManager,
appendSemaphore: Semaphore
) extends UnifiedLog(
logStartOffset,
localLog,
new BrokerTopicStats,
log.producerIdExpirationCheckIntervalMs,
leaderEpochCache,
producerStateManager,
_topicId = None,
keepPartitionMetadataFile = true) {
override def appendAsFollower(records: MemoryRecords): LogAppendInfo = {
appendSemaphore.acquire()
val appendInfo = super.appendAsFollower(records)
appendInfo
}
}
}
|
TiVo/kafka
|
core/src/test/scala/unit/kafka/cluster/PartitionTest.scala
|
Scala
|
apache-2.0
| 88,036
|
package com.twitter.finagle.loadbalancer
import com.twitter.finagle.{Address, ClientConnection, Service}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.DefaultTimer
import com.twitter.util.{Duration, Future, Time}
import java.util.concurrent.atomic.AtomicInteger
private trait Server extends EndpointFactory[Unit, Unit] {
val address = Address.Failed(new Exception)
def remake() = {}
/**
* The maximum amount of concurrent load observed.
*/
def maxLoad: Long
/**
* The total number of load that this server received.
*/
def count: Long
}
/**
* Creates a ServiceFactory that applies a latency profile to Services
* it creates.
*/
private object ServerFactory {
/**
* Creates a [[Server]] with the given `id` and applies `nextLatency`
* latency for each request.
*/
def apply(
id: String,
nextLatency: () => Duration,
sr: StatsReceiver
) = new Server {
private val _load = new AtomicInteger(0)
private val _maxLoad = new AtomicInteger(0)
private val _numRequests = new AtomicInteger(0)
private val service = new Service[Unit, Unit] {
val numRequests = sr.counter("count")
val gauges = Seq(
sr.addGauge("load") { _load.get() },
sr.addGauge("maxload") { _maxLoad.get() }
)
def apply(req: Unit) = {
synchronized {
val l = _load.incrementAndGet()
if (l > _maxLoad.get()) _maxLoad.set(l)
}
numRequests.incr()
_numRequests.incrementAndGet()
Future.sleep(nextLatency())(DefaultTimer).ensure {
_load.decrementAndGet()
}
}
}
def maxLoad = _maxLoad.get().toLong
def count = _numRequests.get().toLong
def apply(conn: ClientConnection) = Future.value(service)
def close(deadline: Time) = Future.Done
override def toString = id
}
}
|
koshelev/finagle
|
finagle-benchmark/src/main/scala/com/twitter/finagle/loadbalancer/ServerFactory.scala
|
Scala
|
apache-2.0
| 1,876
|
package org.psesd.srx.shared.core.exceptions
/** SIF request or response body does not match specified content type.
*
* @version 1.0
* @since 1.0
* @author Stephen Pugmire (iTrellis, LLC)
**/
class SifContentTypeInvalidException(val description: String) extends IllegalArgumentException(
description
)
|
PSESD/srx-shared-core
|
src/main/scala/org/psesd/srx/shared/core/exceptions/SifContentTypeInvalidException.scala
|
Scala
|
mit
| 316
|
import leon.annotation._
import leon.lang._
import leon.lang.synthesis._
object AddressBookMake {
case class Address[A](info: A, priv: Boolean)
sealed abstract class AddressList[A] {
def size: BigInt = {
this match {
case Nil() => BigInt(0)
case Cons(head, tail) => BigInt(1) + tail.size
}
} ensuring { res => res >= 0 }
def content: Set[Address[A]] = this match {
case Nil() => Set[Address[A]]()
case Cons(addr, l1) => Set(addr) ++ l1.content
}
}
case class Cons[A](a: Address[A], tail: AddressList[A]) extends AddressList[A]
case class Nil[A]() extends AddressList[A]
def allPersonal[A](l: AddressList[A]): Boolean = l match {
case Nil() => true
case Cons(a, l1) =>
if (a.priv) allPersonal(l1)
else false
}
def allBusiness[A](l: AddressList[A]): Boolean = l match {
case Nil() => true
case Cons(a, l1) =>
if (a.priv) false
else allBusiness(l1)
}
case class AddressBook[A](business: AddressList[A], personal: AddressList[A]) {
def size: BigInt = business.size + personal.size
def content: Set[Address[A]] = business.content ++ personal.content
def invariant = {
allPersonal(personal) && allBusiness(business)
}
}
def makeAddressBook[A](as: AddressList[A]): AddressBook[A] = {
choose( (res: AddressBook[A]) => res.content == as.content && res.invariant )
/* as match {
case Nil() => AddressBook[A](Nil[A](), Nil[A]())
case Cons(x, xs) =>
val AddressBook(b, p) = makeAddressBook(xs)
if(x.priv) AddressBook(b, Cons(x, p))
else AddressBook(Cons(x, b), p)
}
} ensuring {
res => res.content == as.content && res.invariant */
}
}
|
epfl-lara/leon
|
testcases/synthesis/etienne-thesis/AddressBook/Make.scala
|
Scala
|
gpl-3.0
| 1,737
|
/*
* Copyright (c) 2014 François Cabrol.
*
* This file is part of MURAL.
*
* MURAL is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* MURAL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with MURAL. If not, see <http://www.gnu.org/licenses/>.
*/
package com.cabrol.francois.mural.generator.rulebased.method
import com.cabrol.francois.libjamu.musictheory.entity.note.Note
import com.cabrol.francois.mural.generator.rulebased.parameters.Parameters
object Methods extends Enumeration {
val RULESBASED = Method()
}
sealed case class Method() extends GenerationMethod {
private def getObject : GenerationMethod = this match {
case Methods.RULESBASED => new RuleBased
case _ => throw new Error("The method does not exists")
}
override def generateSequence(parameters: Parameters): List[Note] = getObject.generateSequence(parameters)
}
|
francoiscabrol/MURAL
|
src/main/scala/com/cabrol/francois/mural/generator/rulebased/method/Methods.scala
|
Scala
|
gpl-3.0
| 1,353
|
package com.sksamuel.scapegoat.inspections.math
import com.sksamuel.scapegoat.InspectionTest
/** @author Matic Potočnik */
class UseCbrtTest extends InspectionTest {
override val inspections = Seq(new UseCbrt)
"using pow instead of cbrt" - {
"should report warning" in {
val code = """object Test {
val a = 2
scala.math.pow(2, 1/3d)
math.pow(2, 1/3d)
scala.math.pow(2, 1/3f)
math.pow(2, 1/3f)
Math.pow(2, 1/3d)
StrictMath.pow(2, 1/3d)
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 6
}
}
}
|
sksamuel/scapegoat
|
src/test/scala/com/sksamuel/scapegoat/inspections/math/UseCbrtTest.scala
|
Scala
|
apache-2.0
| 764
|
package models
import scala.collection.immutable
import org.specs2.mutable.Specification
import play.api.test._
import play.api.test.Helpers._
import com.ruimo.scoins.Scoping._
import helpers.InjectorSupport
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.db.Database
import java.time.Instant
class TransactionSummarySpec extends Specification with InjectorSupport {
def date(s: String): Instant = Instant.ofEpochMilli(java.sql.Date.valueOf(s).getTime)
"TransactionSummary" should {
"Can list summary" in {
implicit val app: Application = GuiceApplicationBuilder().configure(inMemoryDatabase()).build()
val localeInfo = inject[LocaleInfoRepo]
val currencyInfo = inject[CurrencyRegistry]
inject[Database].withConnection { implicit conn =>
val tax1 = inject[TaxRepo].createNew
val tax2 = inject[TaxRepo].createNew
inject[TaxHistoryRepo].createNew(tax1, TaxType.OUTER_TAX, BigDecimal("5"), date("9999-12-31"))
inject[TaxHistoryRepo].createNew(tax2, TaxType.INNER_TAX, BigDecimal("5"), date("9999-12-31"))
val user1 = inject[StoreUserRepo].create(
"name1", "first1", None, "last1", "email1", 123L, 234L, UserRole.NORMAL, Some("companyName")
)
val user2 = inject[StoreUserRepo].create(
"name2", "first2", None, "last2", "email2", 123L, 234L, UserRole.NORMAL, Some("companyName2")
)
import localeInfo.Ja
val site1 = inject[SiteRepo].createNew(Ja, "商店1")
val site2 = inject[SiteRepo].createNew(Ja, "商店2")
val cat1 = inject[CategoryRepo].createNew(Map(Ja -> "植木"))
val item1 = inject[ItemRepo].createNew(cat1)
val item2 = inject[ItemRepo].createNew(cat1)
inject[ItemNameRepo].createNew(item1, Map(Ja -> "杉"))
inject[ItemNameRepo].createNew(item2, Map(Ja -> "梅"))
inject[SiteItemRepo].createNew(site1, item1)
inject[SiteItemRepo].createNew(site2, item2)
inject[ItemDescriptionRepo].createNew(item1, site1, "杉説明")
inject[ItemDescriptionRepo].createNew(item2, site1, "梅説明")
val price1 = inject[ItemPriceRepo].createNew(item1, site1)
val price2 = inject[ItemPriceRepo].createNew(item2, site2)
inject[ItemPriceHistoryRepo].createNew(
price1, tax1, currencyInfo.Jpy, BigDecimal(119), None, BigDecimal(100), date("9999-12-31")
)
inject[ItemPriceHistoryRepo].createNew(
price2, tax1, currencyInfo.Jpy, BigDecimal(59), None, BigDecimal(50), date("9999-12-31")
)
inject[ShoppingCartItemRepo].addItem(user1.id.get, site1.id.get, item1.id.get.id, 1)
inject[ShoppingCartItemRepo].addItem(user1.id.get, site2.id.get, item2.id.get.id, 1)
inject[ShoppingCartItemRepo].addItem(user2.id.get, site1.id.get, item1.id.get.id, 2)
val itemClass1 = 1L
val box1 = inject[ShippingBoxRepo].createNew(site1.id.get, itemClass1, 10, "小箱")
val box2 = inject[ShippingBoxRepo].createNew(site2.id.get, itemClass1, 3, "小箱")
val shipping1 = inject[ShippingFeeRepo].createNew(box1.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
val shipping2 = inject[ShippingFeeRepo].createNew(box2.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
inject[ShippingFeeHistoryRepo].createNew(
shipping1.id.get, tax2.id.get, BigDecimal(1234), Some(BigDecimal(1000)), date("9999-12-31")
)
inject[ShippingFeeHistoryRepo].createNew(
shipping2.id.get, tax2.id.get, BigDecimal(2345), None, date("9999-12-31")
)
inject[ShippingFeeHistoryRepo].feeBySiteAndItemClass(
CountryCode.JPN, JapanPrefecture.東京都.code,
ShippingFeeEntries()
.add(site1, itemClass1, 3)
.add(site2, itemClass1, 5)
)
implicit val storeUserRepo = inject[StoreUserRepo]
val cart1 = inject[ShoppingCartItemRepo].listItemsForUser(
Ja,
LoginSession(user1, None, 0)
)._1
val addr1 = Address.createNew(
countryCode = CountryCode.JPN,
firstName = "FirstName",
lastName = "LastName",
zip1 = "123",
prefecture = JapanPrefecture.東京都,
address1 = "Address1",
address2 = "Address2",
tel1 = "12345678"
)
val cart2 = inject[ShoppingCartItemRepo].listItemsForUser(
Ja,
LoginSession(user2, None, 0)
)._1
val addr2 = Address.createNew(
countryCode = CountryCode.JPN,
firstName = "FirstName2",
lastName = "LastName2",
zip1 = "123",
prefecture = JapanPrefecture.東京都,
address1 = "Address21",
address2 = "Address22",
tel1 = "1234567890"
)
val shippingDate1 = ShippingDate(
Map(
site1.id.get -> ShippingDateEntry(site1.id.get, date("2013-02-03")),
site2.id.get -> ShippingDateEntry(site2.id.get, date("2013-05-03"))
)
)
val shippingDate2 = ShippingDate(
Map(
site1.id.get -> ShippingDateEntry(site1.id.get, date("2013-02-04"))
)
)
val persister = inject[TransactionPersister]
implicit val taxRepo = inject[TaxRepo]
val t = persister.persist(
Transaction(user1.id.get, currencyInfo.Jpy, cart1, Some(addr1),
inject[controllers.Shipping].shippingFee(addr1, cart1), shippingDate1)
)
val tranNo1: Long = t._1
val taxesBySite1: immutable.Map[Site, immutable.Seq[TransactionLogTax]] = t._2
val ptran1 = persister.load(tranNo1, Ja)
val siteUser1 = inject[SiteUserRepo].createNew(user1.id.get, site1.id.get)
val summary1 = inject[TransactionSummary].list(Some(siteUser1.siteId)).records
summary1.size === 1
val entry1 = summary1.head
entry1.transactionId === tranNo1
entry1.transactionTime === ptran1.header.transactionTime
entry1.totalAmount === BigDecimal(119 + 1234)
entry1.address === Some(addr1.copy(email = user1.email))
entry1.siteName === "商店1"
entry1.shippingFee === BigDecimal(1234)
entry1.status === TransactionStatus.ORDERED
val sum1 = inject[TransactionSummary].get(Some(siteUser1.siteId), entry1.transactionSiteId)
sum1.isDefined === true
val t2 = persister.persist(
Transaction(user2.id.get, currencyInfo.Jpy, cart2, Some(addr2),
inject[controllers.Shipping].shippingFee(addr2, cart2), shippingDate2)
)
val tranNo2: Long = t2._1
val taxesBySite2: immutable.Map[Site, immutable.Seq[TransactionLogTax]] = t2._2
val ptran2 = persister.load(tranNo2, Ja)
val siteUser2 = inject[SiteUserRepo].createNew(user1.id.get, site2.id.get)
doWith(inject[TransactionSummary].list(Some(siteUser1.siteId)).records) { s =>
s.size === 2
doWith(s(0)) { e =>
e.transactionId === tranNo2
e.transactionTime === ptran2.header.transactionTime
e.totalAmount === BigDecimal(119 * 2 + 1234)
e.address === Some(addr2.copy(email = user2.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
doWith(s(1)) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(119 + 1234)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
}
doWith(inject[TransactionSummary].list(Some(siteUser2.siteId)).records) { s =>
s.size === 1
doWith(s(0)) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(59 + 2345)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店2"
e.shippingFee === BigDecimal(2345)
e.status === TransactionStatus.ORDERED
}
}
doWith(inject[TransactionSummary].list(storeUserId = Some(user1.id.get)).records) { s =>
s.size === 2
doWith(s.map { ele => (ele.siteName, ele) }.toMap) { map =>
doWith(map("商店1")) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(119 + 1234)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
doWith(map("商店2")) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(59 + 2345)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店2"
e.shippingFee === BigDecimal(2345)
e.status === TransactionStatus.ORDERED
}
}
}
doWith(inject[TransactionSummary].list(storeUserId = Some(user2.id.get)).records) { s =>
s.size === 1
doWith(s(0)) { e =>
e.transactionId === tranNo2
e.transactionTime === ptran2.header.transactionTime
e.totalAmount === BigDecimal(119 * 2 + 1234)
e.address === Some(addr2.copy(email = user2.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
}
doWith(inject[TransactionSummary].list(tranId = Some(tranNo1)).records) { s =>
s.size === 2
doWith(s.map { ele => (ele.siteName, ele) }.toMap) { map =>
doWith(map("商店1")) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(119 + 1234)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
doWith(map("商店2")) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(59 + 2345)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店2"
e.shippingFee === BigDecimal(2345)
e.status === TransactionStatus.ORDERED
}
}
}
doWith(inject[TransactionSummary].list(tranId = Some(tranNo2)).records) { s =>
s.size === 1
doWith(s(0)) { e =>
e.transactionId === tranNo2
e.transactionTime === ptran2.header.transactionTime
e.totalAmount === BigDecimal(119 * 2 + 1234)
e.address === Some(addr2.copy(email = user2.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
}
}
}
"Can listByPeriod summary" in {
implicit val app: Application = GuiceApplicationBuilder().configure(inMemoryDatabase()).build()
val localeInfo = inject[LocaleInfoRepo]
val currencyInfo = inject[CurrencyRegistry]
inject[Database].withConnection { implicit conn =>
val tax1 = inject[TaxRepo].createNew
val tax2 = inject[TaxRepo].createNew
inject[TaxHistoryRepo].createNew(tax1, TaxType.OUTER_TAX, BigDecimal("5"), date("9999-12-31"))
inject[TaxHistoryRepo].createNew(tax2, TaxType.INNER_TAX, BigDecimal("5"), date("9999-12-31"))
val user1 = inject[StoreUserRepo].create(
"name1", "first1", None, "last1", "email1", 123L, 234L, UserRole.NORMAL, Some("companyName")
)
val user2 = inject[StoreUserRepo].create(
"name2", "first2", None, "last2", "email2", 123L, 234L, UserRole.NORMAL, Some("companyName2")
)
import localeInfo.Ja
val site1 = inject[SiteRepo].createNew(Ja, "商店1")
val site2 = inject[SiteRepo].createNew(Ja, "商店2")
val cat1 = inject[CategoryRepo].createNew(Map(Ja -> "植木"))
val item1 = inject[ItemRepo].createNew(cat1)
val item2 = inject[ItemRepo].createNew(cat1)
inject[ItemNameRepo].createNew(item1, Map(Ja -> "杉"))
inject[ItemNameRepo].createNew(item2, Map(Ja -> "梅"))
inject[SiteItemRepo].createNew(site1, item1)
inject[SiteItemRepo].createNew(site2, item2)
inject[ItemDescriptionRepo].createNew(item1, site1, "杉説明")
inject[ItemDescriptionRepo].createNew(item2, site1, "梅説明")
val price1 = inject[ItemPriceRepo].createNew(item1, site1)
val price2 = inject[ItemPriceRepo].createNew(item2, site2)
inject[ItemPriceHistoryRepo].createNew(
price1, tax1, currencyInfo.Jpy, BigDecimal(119), None, BigDecimal(100), date("9999-12-31")
)
inject[ItemPriceHistoryRepo].createNew(
price2, tax1, currencyInfo.Jpy, BigDecimal(59), None, BigDecimal(50), date("9999-12-31")
)
inject[ShoppingCartItemRepo].addItem(user1.id.get, site1.id.get, item1.id.get.id, 1)
inject[ShoppingCartItemRepo].addItem(user1.id.get, site2.id.get, item2.id.get.id, 1)
inject[ShoppingCartItemRepo].addItem(user2.id.get, site1.id.get, item1.id.get.id, 2)
val itemClass1 = 1L
val box1 = inject[ShippingBoxRepo].createNew(site1.id.get, itemClass1, 10, "小箱")
val box2 = inject[ShippingBoxRepo].createNew(site2.id.get, itemClass1, 3, "小箱")
val shipping1 = inject[ShippingFeeRepo].createNew(box1.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
val shipping2 = inject[ShippingFeeRepo].createNew(box2.id.get, CountryCode.JPN, JapanPrefecture.東京都.code)
inject[ShippingFeeHistoryRepo].createNew(
shipping1.id.get, tax2.id.get, BigDecimal(1234), None, date("9999-12-31")
)
inject[ShippingFeeHistoryRepo].createNew(
shipping2.id.get, tax2.id.get, BigDecimal(2345), Some(BigDecimal(2000)), date("9999-12-31")
)
inject[ShippingFeeHistoryRepo].feeBySiteAndItemClass(
CountryCode.JPN, JapanPrefecture.東京都.code,
ShippingFeeEntries()
.add(site1, itemClass1, 3)
.add(site2, itemClass1, 5)
)
implicit val storeUserRepo = inject[StoreUserRepo]
val cart1 = inject[ShoppingCartItemRepo].listItemsForUser(
Ja,
LoginSession(user1, None, 0)
)._1
val addr1 = Address.createNew(
countryCode = CountryCode.JPN,
firstName = "FirstName",
lastName = "LastName",
zip1 = "123",
prefecture = JapanPrefecture.東京都,
address1 = "Address1",
address2 = "Address2",
tel1 = "12345678"
)
val cart2 = inject[ShoppingCartItemRepo].listItemsForUser(
Ja,
LoginSession(user2, None, 0)
)._1
val addr2 = Address.createNew(
countryCode = CountryCode.JPN,
firstName = "FirstName2",
lastName = "LastName2",
zip1 = "123",
prefecture = JapanPrefecture.東京都,
address1 = "Address21",
address2 = "Address22",
tel1 = "1234567890"
)
val shippingDate1 = ShippingDate(
Map(
site1.id.get -> ShippingDateEntry(site1.id.get, date("2013-02-03")),
site2.id.get -> ShippingDateEntry(site2.id.get, date("2013-05-03"))
)
)
val shippingDate2 = ShippingDate(
Map(
site1.id.get -> ShippingDateEntry(site1.id.get, date("2013-02-04"))
)
)
val persister = inject[TransactionPersister]
implicit val taxRepo = inject[TaxRepo]
val t = persister.persist(
Transaction(
user1.id.get, currencyInfo.Jpy, cart1, Some(addr1),
inject[controllers.Shipping].shippingFee(addr1, cart1), shippingDate1,
now = date("2013-01-31")
)
)
val tranNo1: Long = t._1
val taxesBySite1: immutable.Map[Site, immutable.Seq[TransactionLogTax]] = t._2
val t2 = persister.persist(
Transaction(
user2.id.get, currencyInfo.Jpy, cart2, Some(addr2),
inject[controllers.Shipping].shippingFee(addr2, cart2), shippingDate2,
now = date("2013-03-01")
)
)
val tranNo2: Long = t2._1
val taxesBySite2: immutable.Map[Site, immutable.Seq[TransactionLogTax]] = t2._2
val ptran1 = persister.load(tranNo1, Ja)
val ptran2 = persister.load(tranNo2, Ja)
val siteUser1 = inject[SiteUserRepo].createNew(user1.id.get, site1.id.get)
val siteUser2 = inject[SiteUserRepo].createNew(user1.id.get, site2.id.get)
doWith(inject[TransactionSummary].listByPeriod(siteId = Some(siteUser1.siteId), yearMonth = YearMonth(2013, 1, "show"))) { s =>
s.size === 1
doWith(s(0)) { e =>
e.transactionId === tranNo1
e.transactionTime === ptran1.header.transactionTime
e.totalAmount === BigDecimal(119 + 1234)
e.address === Some(addr1.copy(email = user1.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
}
doWith(inject[TransactionSummary].listByPeriod(siteId = Some(siteUser1.siteId), yearMonth = YearMonth(2013, 3, "show"))) { s =>
s.size === 1
doWith(s(0)) { e =>
e.transactionId === tranNo2
e.transactionTime === ptran2.header.transactionTime
e.totalAmount === BigDecimal(119 * 2 + 1234)
e.address === Some(addr2.copy(email = user2.email))
e.siteName === "商店1"
e.shippingFee === BigDecimal(1234)
e.status === TransactionStatus.ORDERED
}
}
}
}
}
}
|
ruimo/store2
|
test/models/TransactionSummarySpec.scala
|
Scala
|
apache-2.0
| 18,623
|
package com.github.mdr.mash.ns.core
import com.github.mdr.mash.Singletons
import com.github.mdr.mash.classes.{ Field, MashClass, NewStaticMethod }
import com.github.mdr.mash.compiler.CompilationUnit
import com.github.mdr.mash.evaluator._
import com.github.mdr.mash.functions.{ BoundParams, MashMethod, ParameterModel }
import com.github.mdr.mash.inference.Type
import com.github.mdr.mash.ns.os.PathClass
import com.github.mdr.mash.ns.time.DateTimeClass
import com.github.mdr.mash.runtime.{ MashBoolean, MashObject, MashString, MashValue }
object HistoryEntryClass extends MashClass("core.HistoryEntry") {
private lazy val scriptExecutor = Singletons.scriptExecutor
object Fields {
val Session = Field("session", Some("ID of shell session"), StringClass)
val CommandNumber = Field("commandNumber", Some("Number of the command within the session"), NumberClass)
val Timestamp = Field("timestamp", Some("Time command was executed"), DateTimeClass)
val Command = Field("command", Some("Command"), StringClass)
val Mish = Field("mish", Some("Whether the command was executed in mish mode"), BooleanClass)
val Result = Field("result", Some("Result of the command (if available, else null)"), Type.Any)
val WorkingDirectory = Field("workingDirectory", Some("Directory where the command was executed"), StringClass taggedWith PathClass)
}
import Fields._
override val fields = Seq(Session, CommandNumber, Timestamp, Command, Mish, Result, WorkingDirectory)
override val methods = Seq(
ReexecuteMethod)
override val staticMethods = Seq(NewStaticMethod(this))
object ReexecuteMethod extends MashMethod("reexecute") {
val params = ParameterModel.Empty
def call(target: MashValue, boundParams: BoundParams): MashValue = {
val entryObject = target.asInstanceOf[MashObject]
val command = entryObject.get(Command).getOrElse(
throw EvaluatorException("Invalid history entry")).asInstanceOf[MashString].s
val mish = entryObject.get(Mish).get.asInstanceOf[MashBoolean].value
scriptExecutor.runUnit(CompilationUnit(command, "eval", mish = mish))
}
override def summaryOpt = Some("Re-execute the command ran in this history entry")
}
override def summaryOpt = Some("An entry in Mash command history")
}
|
mdr/mash
|
src/main/scala/com/github/mdr/mash/ns/core/HistoryEntryClass.scala
|
Scala
|
mit
| 2,297
|
/* Copyright (C) 2008-2010 Univ of Massachusetts Amherst, Computer Science Dept
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://code.google.com/p/factorie/
This software is provided under the terms of the Eclipse Public License 1.0
as published by http://www.opensource.org. For further information,
see the file `LICENSE.txt' included with this distribution. */
package cc.factorie
/** A Variable containing a single Boolean value, which might be mutable or immutable.
@see BooleanVariable
@see BooleanObservation
@author Andrew McCallum */
trait BooleanVar extends CategoricalVar[Boolean] {
type VariableType <: BooleanVar
override def value = (intValue == 1) // Efficiently avoid a lookup in the domain
def booleanValue = (intValue == 1) // Alias for the above method
//def booleanValue: Boolean = (intValue == 1)
def ^(other:BooleanVar):Boolean = value && other.value
def v(other:BooleanVar):Boolean = value || other.value
def ==>(other:BooleanVar):Boolean = !value || other.value
def unary_!(): Boolean = !value
override def toString = if (intValue == 0) printName+"(false)" else printName+"(true)"
type DomainType <: BooleanDomain[VariableType]
class DomainClass extends BooleanDomain
// Domain is not in subclasses: all BooleanValue variables share the same domain.
}
/** A trait for mutable Boolean variables.
@author Andrew McCallum */
class BooleanVariable(initialValue:Boolean = false) extends CategoricalVariable(initialValue) with BooleanVar {
type VariableType <: BooleanVariable
// Avoid CategoricalVariable's HashMap lookup
override final def set(newValue:Boolean)(implicit d: DiffList): Unit = set(if (newValue) 1 else 0)
}
/** A trait for variables with immutable Boolean values.
@author Andrew McCallum */
class BooleanObservation(theValue:Boolean) extends CategoricalObservation(theValue) with BooleanVar {
type VariableType <: BooleanObservation
}
// The next two are versions that take convenient constructor arguments.
// TODO Are we happy with their names? "Bool"? Might someone want/expect to simply extend BooleanVariable(myflag) ??
// /** A variable class for boolean values, defined specially for convenience.
// If you have several different "types" of booleans, you might want to subclass this to enable type safety checks.
// This class allowed variable-value coordination by overriding the 'setByIndex' method; by contrast the 'Bool' class does not. */
// class CoordinatedBoolVariable(initialValue: Boolean) extends BooleanVariable {
// def this() = this(false)
// type VariableType <: CoordinatedBoolVariable
// setByIndex(if (initialValue == true) 1 else 0)(null)
// }
// /** A variable class for boolean values, defined specially for convenience.
// If you have several different "types" of booleans, you might want to subclass this to enable type safety checks. */
// // TODO Should I rename this BoolVariable for consistency?
// class BoolVariable(b: Boolean) extends CoordinatedBoolVariable(b) with UncoordinatedCategoricalVariable {
// def this() = this(false)
// type VariableType <: BoolVariable
// }
// Provide an alias with a shorter name
// TODO Consider removing this for uniformity and simplicity. We could make up for it by introducing an implicit convertion from scala.Boolean to BooleanObservation.
class Bool(b:Boolean = false) extends BooleanVariable(b) {
type VariableType <: Bool
//def this() = this(false)
}
// class CoordinatedBool(b:Boolean) extends CoordinatedBoolVariable(b) {
// def this() = this(false)
// }
class BooleanDomain[V<:BooleanVar](implicit m:Manifest[V]) extends CategoricalDomain[V] {
this += false
this += true
freeze
override def apply(index:Int) = index == 1
override def get(index:Int) = index == 1
override def index(entry:Boolean) = if (entry) 1 else 0
override def getIndex(entry:Boolean) = if (entry) 1 else 0
}
// TODO Consider renaming this 'object BooleanObservation'
object Bool {
val t = new BooleanObservation(true) // TODO This should be BoolObservation! Because we wouldn't want t.set(false)!!!
val f = new BooleanObservation(false)
def apply(b: Boolean) = if (b) t else f
}
|
andrewmilkowski/factorie
|
src/main/scala/cc/factorie/VariableBoolean.scala
|
Scala
|
epl-1.0
| 4,265
|
package codechicken.multipart.handler
import cpw.mods.fml.common.Mod
import cpw.mods.fml.common.event.FMLPostInitializationEvent
import cpw.mods.fml.common.event.FMLPreInitializationEvent
import cpw.mods.fml.common.event.FMLInitializationEvent
import cpw.mods.fml.common.Mod.EventHandler
import cpw.mods.fml.common.event.FMLServerAboutToStartEvent
import codechicken.multipart.MultiPartRegistry
@Mod(modid = "ForgeMultipart", acceptedMinecraftVersions = "[1.7.10]", modLanguage = "scala")
object MultipartMod
{
@EventHandler
def preInit(event: FMLPreInitializationEvent) {
MultipartProxy.preInit(event.getModConfigurationDirectory, event.getModLog)
}
@EventHandler
def init(event: FMLInitializationEvent) {
MultipartProxy.init()
}
@EventHandler
def postInit(event: FMLPostInitializationEvent) {
if (MultiPartRegistry.required) {
MultiPartRegistry.postInit()
MultipartProxy.postInit()
}
}
@EventHandler
def beforeServerStart(event: FMLServerAboutToStartEvent) {
MultiPartRegistry.beforeServerStart()
}
}
|
Chicken-Bones/ForgeMultipart
|
src/codechicken/multipart/handler/MultipartMod.scala
|
Scala
|
lgpl-2.1
| 1,119
|
package text.vector.wordembedding.fastText
import java.io.{File, IOException, PrintWriter}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, Paths}
import ir.fulltext.indri.{IndriResult, TrecText}
import text.analyzer.JapaneseSentenceSplitter
import text.analyzer.mor.mecab.UnidicMecab
import text.{StringNone, StringOption, StringSome}
import util.Config
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.sys.process.Process
import scala.xml.{NodeSeq, XML}
/**
* <pre>
* Created on 2016/12/24.
* </pre>
*
* @author K.Sakamoto
*/
object FastTextVectorGenerator {
@throws[Exception]
def main(args: Array[String]): Unit = {
println(">> fastText Word Vector Generating")
val indriResultMap: mutable.Map[String, IndriResult] = mutable.Map.empty[String, IndriResult]
Config.trecTextFormatData foreach {
path: String =>
println(path.toString)
Paths.get(path).toFile.listFiles foreach {
file: File =>
indriResultMap ++=
TrecText.toIndriResultMap(Source.fromFile(file).getLines, StringNone, Nil, indriResultMap)
}
}
val builder: StringBuilder = new StringBuilder()
indriResultMap foreach {
case (_, indriResult) =>
indriResult.text match {
case StringSome(t) =>
builder.append(t)
case StringNone =>
// Do nothing
}
}
//
val mEssayExamFiles = ListBuffer.empty[File]
Config.essayExamDirOpt match {
case Some(essayExamDir) =>
val essayExamDirFile: File = new File(essayExamDir)
if (essayExamDirFile.canRead && essayExamDirFile.isDirectory) {
essayExamDirFile.listFiles foreach {
case file: File if file.canRead && file.isFile && file.getName.endsWith(".xml") =>
mEssayExamFiles += file
case _ =>
// Do nothing
}
}
case None =>
// Do nothing
}
mEssayExamFiles.result foreach {
file: File =>
println(file.getName)
val xml: NodeSeq = XML.loadFile(file)
xml \\ "answer_section" foreach {
answerSection: NodeSeq =>
answerSection \\ "instruction" \\ "p" foreach {
p: NodeSeq =>
//println(p.text.trim)
builder.append(p.text.trim)
}
builder.append ({
for (keyword <- answerSection \\ "keyword_set" \\ "keyword") yield {
keyword.text.trim
}
}.mkString(" ", " ", " "))
val answerSet: NodeSeq = answerSection \\ "answer_set" \\ "answer"
answerSet foreach {
answer: NodeSeq =>
answer \\ "expression_set" \\ "expression" foreach {
expression: NodeSeq =>
//println(expression.text.trim)
builder.append(expression.text.trim)
}
}
}
}
val dataPath: Path = Paths.get(Config.fastTextResource)
val writer: PrintWriter = new PrintWriter(Files.newBufferedWriter(dataPath, StandardCharsets.UTF_8))
try {
for (sentenceTmp <- JapaneseSentenceSplitter.split(StringOption(builder.result))) {
UnidicMecab.extractWords(StringOption(sentenceTmp.text)) foreach {
word: String =>
if (word != " " && word != " ") {
writer.print(word.concat(" "))
}
}
}
} catch {
case e: IOException =>
e.printStackTrace()
} finally {
if (Option(writer).nonEmpty) {
try {
writer.close()
} catch {
case e: IOException =>
e.printStackTrace()
}
}
}
val modelPath: Path = Paths.get(Config.fastTextModel)
Process(Seq[String](
"fasttext",
"skipgram",
"-minCount", 1.toString,
"-input", dataPath.toAbsolutePath.toString,
"-output", modelPath.toAbsolutePath.toString
)).run
}
}
|
ktr-skmt/FelisCatusZero
|
src/main/scala/text/vector/wordembedding/fastText/FastTextVectorGenerator.scala
|
Scala
|
apache-2.0
| 4,061
|
package scutil.text.extension
import scutil.text._
object StringContextImplicits extends StringContextImplicits
trait StringContextImplicits {
implicit final class TextStringContextExt(peer:StringContext) {
/** applies Text.stripMarginOnly */
def strip(args:String*):String = Text stripMarginOnly peer.s(args:_*)
def tb(args:String*):String = Block.generate(peer.parts map StringContext.processEscapes, args)
def rtb(args:String*):String = Block.generate(peer.parts, args)
}
}
|
ritschwumm/scutil
|
modules/core/src/main/scala/scutil/text/extension/StringContextImplicits.scala
|
Scala
|
bsd-2-clause
| 492
|
package ucesoft.cbm.formats
import ucesoft.cbm.peripheral.bus.BusDataIterator
import ucesoft.cbm.peripheral.drive.Floppy
import ucesoft.cbm.cpu.Memory
import java.io.RandomAccessFile
import scala.collection.mutable.ListBuffer
import java.util.StringTokenizer
import java.io.FileNotFoundException
import java.io.IOException
import java.nio.file.Files
import java.io.File
import java.nio.file.StandardCopyOption
object Diskette {
object FileType extends Enumeration {
val DEL, SEQ, PRG, USR, REL, CBM = Value
}
case class DirEntry(fileType: FileType.Value, fileName: String, t: Int, s: Int, sizeInSectors: Int,entryTrack:Int,entrySector:Int,entryPos:Int)
case class BamInfo(diskName: String, diskID: String, dosType: String,singleSide:Boolean,freeSectors:Int)
case class FileData(fileName: String, startAddress: Int, data: Array[Int]) {
def iterator = {
val buffer = if (startAddress != -1) Array.ofDim[Int](data.length + 2) else data
if (startAddress != -1) {
buffer(0) = startAddress % 256
buffer(1) = startAddress / 256
Array.copy(data, 0, buffer, 2, data.length)
}
new BusDataIterator {
private[this] var index = 0
override def hasNext = index < buffer.length
override def next = {
val value = buffer(index)
index += 1
value
}
def isLast = index == buffer.length - 1
def getPerc = (100 * index.toFloat / buffer.length).toInt
def goto(pos:Int) : Unit = {
index = pos
}
}
}
}
def fileNameMatch(fileNameToSearch:String,fileName:String) : Boolean = {
var i = 0
while (i < fileNameToSearch.length) {
val a = fileNameToSearch.charAt(i)
if (a == '*') return true
if (i >= fileName.length) return false
val b = fileName.charAt(i)
if (a == '?') i += 1
else
if (a == b) i += 1
else return false
}
fileNameToSearch.length == fileName.length
}
// factory method
def apply(fileName:String,load : Boolean = true) : Diskette = {
val upper = fileName.toUpperCase
if (upper.endsWith(".D64") || upper.endsWith(".D71")) new D64_D71(fileName,load)
else
if (upper.endsWith(".D81") ) new D81(fileName)
else
if (upper.endsWith(".G64") ) new G64(fileName)
else throw new IllegalArgumentException("Unsupported file format")
}
def makeEmptyDisk(file:String) : Unit = {
val p = file.lastIndexOf('.')
if (p == -1) throw new IllegalArgumentException("File name must have a valid extension")
val ext = file.substring(p + 1).toUpperCase
val diskRes = ext match {
case "G64" =>
"emptyDisk.g64"
case "D64" =>
"emptyDisk.d64"
case "D71" =>
"emptyDisk.d71"
case "D81" =>
"emptyDisk.d81"
case _ =>
throw new IllegalArgumentException(s"Unsupported disk format: $ext")
}
val emptyDisk = getClass.getResourceAsStream(s"/resources/$diskRes")
if (emptyDisk == null) throw new IllegalArgumentException(s"Cannot find '$diskRes'")
Files.copy(emptyDisk,new File(file).toPath,StandardCopyOption.REPLACE_EXISTING)
}
}
abstract class Diskette extends Floppy {
import Diskette._
protected val BYTES_PER_SECTOR = 256
protected val DIR_TRACK = 18
protected val DIR_SECTOR = 1
protected val BAM_SECTOR = 0
val canBeEmulated : Boolean
protected val disk : RandomAccessFile
protected def absoluteSector(t:Int,s:Int) : Int = 0
protected def makeDirEntryFromBuffer(buffer:Array[Byte],t:Int,s:Int,pos:Int) : DirEntry = {
val fileType = FileType(buffer(2) & 7)
val track = buffer(3)
val sector = buffer(4)
val fileName = new StringBuilder
var a0Found = false
var i = 5
val a0 = 0xA0.toByte
while (i < 0x15 && !a0Found) {
if (buffer(i) == a0) a0Found = true
else fileName.append((buffer(i) & 0xFF).toChar)
i += 1
}
val size = buffer(0x1E).toInt & 0xFF + (buffer(0x1F).toInt & 0xFF) * 256
DirEntry(fileType, fileName.toString, track, sector, size,t,s,pos)
}
def directories : List[DirEntry] = {
var t = DIR_TRACK
var s = DIR_SECTOR
var dirs = new ListBuffer[DirEntry]
var readNextSector = true
val buffer = Array.ofDim[Byte](0x20)
while (readNextSector) {
val currentT = t
val currentS = s
disk.seek(absoluteSector(t, s) * BYTES_PER_SECTOR)
var firstEntryOfSector = true
var entryIndex = 0
var readNextEntry = true
while (readNextEntry) {
disk.read(buffer)
if (firstEntryOfSector) {
firstEntryOfSector = false
val nextT = buffer(0)
val nextS = buffer(1)
if (nextT != 0) {
t = nextT
s = nextS
}
else readNextSector = false
}
entryIndex += 1
if (entryIndex == 9 || buffer.forall(_ == 0)) {
readNextEntry = false // last+1 entry of this sector
}
else dirs += makeDirEntryFromBuffer(buffer,currentT,currentS,entryIndex - 1)
}
}
dirs.toList
}
// optional
def bam : BamInfo
def readBlock(track:Int,sector:Int) = {
disk.seek(absoluteSector(track,sector) * BYTES_PER_SECTOR)
val buffer = Array.ofDim[Byte](BYTES_PER_SECTOR)
disk.read(buffer)
buffer
}
// =======================================================================
def loadInMemory(mem: Memory, fileName: String, relocate: Boolean,c64Mode:Boolean,drive:Int) : Unit = {
load(fileName) match {
case FileData(fn, startAddress, data) =>
val (start,end) = ProgramLoader.loadPRG(mem,data,if (relocate) Some(startAddress) else None,c64Mode,drive,fileName)
println(s"Loaded $fn from $start to $end")
case _ =>
}
}
protected def loadPRG(entry: DirEntry) = {
val buffer = Array.ofDim[Byte](BYTES_PER_SECTOR)
val data = new ListBuffer[Int]
var lastChunk = false
var isFirstChunk = true
var startAddress = 0
var t = entry.t
var s = entry.s
while (!lastChunk) {
disk.seek(absoluteSector(t, s) * BYTES_PER_SECTOR)
disk.read(buffer)
var chunkIndex = 0
t = buffer(0)
s = buffer(1).toInt & 0xFF
lastChunk = t == 0
if (isFirstChunk) {
isFirstChunk = false
startAddress = (buffer(2).toInt & 0xFF) + (buffer(3).toInt & 0xFF) * 256
chunkIndex += 4
} else chunkIndex += 2
val lastByte = if (lastChunk) s else 255
while (chunkIndex <= lastByte) {
data += buffer(chunkIndex).toInt & 0xFF
chunkIndex += 1
}
}
FileData(entry.fileName, startAddress, data.toArray)
}
protected def loadSEQ(entry: DirEntry) = {
val buffer = Array.ofDim[Byte](BYTES_PER_SECTOR)
val data = new ListBuffer[Int]
var lastChunk = false
var t = entry.t
var s = entry.s
while (!lastChunk) {
disk.seek(absoluteSector(t, s) * BYTES_PER_SECTOR)
disk.read(buffer)
var chunkIndex = 0
t = buffer(0)
s = buffer(1).toInt & 0xFF
lastChunk = t == 0
chunkIndex += 2
val lastByte = if (lastChunk) s else 255
while (chunkIndex <= lastByte) {
data += buffer(chunkIndex).toInt & 0xFF
chunkIndex += 1
}
}
FileData(entry.fileName, -1, data.toArray)
}
def load(fileName: String,fileType:FileType.Value = FileType.PRG) = {
if (fileName.startsWith("$")) formatDirectoriesAsPRG(fileName)
else {
val dpos = fileName.indexOf(":")
val st = new StringTokenizer(if (dpos != -1) fileName.substring(dpos + 1) else fileName,",")
val fn = st.nextToken
val ft = if (st.hasMoreTokens && st.nextToken == "S") FileType.SEQ else fileType
directories find { e =>
ft == e.fileType && fileNameMatch(fn,e.fileName)
} match {
case None => throw new FileNotFoundException(fileName)
case Some(entry) =>
entry.fileType match {
case FileType.PRG => loadPRG(entry)
case FileType.SEQ => loadSEQ(entry)
case _ => throw new IOException("Bad file type: " + entry.fileType)
}
}
}
}
private def formatDirectoriesAsPRG(fileName:String) = {
val colonPos = fileName.indexOf(":")
val dirs = if (colonPos == -1) directories else {
val filter = fileName.substring(colonPos + 1)
val asteriskPos = filter.indexOf('*')
directories filter { fn =>
if (asteriskPos == -1) fn.fileName == filter else fn.fileName.startsWith(filter.substring(0,asteriskPos))
}
}
val out = new ListBuffer[Int]
val _bam = bam
// set start address to $0801
var ptr = 0x801
// write next line address
ptr += 30
out.append(ptr & 0xFF) // L
out.append(ptr >> 8) // H
// write label
out.append(0) // drive L
out.append(0) // drive H
out.append(0x12) // RVS ON
out.append(0x22) // "
for(i <- 0 until 16) {
if (i < _bam.diskName.length) out.append(_bam.diskName.charAt(i)) else out.append(0x20)
}
out.append(0x22) // "
out.append(0x20)
out.append(_bam.diskID(0))
out.append(_bam.diskID(1))
out.append(0x20)
out.append(_bam.dosType(0))
out.append(_bam.dosType(1))
out.append(0x00) // EOL
for(dir <- dirs) {
val blanks = if (dir.sizeInSectors < 10) 3
else
if (dir.sizeInSectors < 100) 2
else 1
// write next line address
ptr += blanks + 2 + 2 + 18 + 5
val endBlanks = 32 - (blanks + 2 + 2 + 18 + 5)
out.append(ptr & 0xFF) // L
out.append(ptr >> 8) // H
// write blocks
out.append(dir.sizeInSectors & 0xFF)
out.append(dir.sizeInSectors >> 8)
// blanks after blocks
for(i <- 1 to blanks) out.append(0x20)
out.append(0x22) // "
for(i <- 0 until dir.fileName.length) out.append(dir.fileName.charAt(i))
out.append(0x22) // "
for(i <- 1 to 16 - dir.fileName.length) out.append(0x20)
out.append(0x20) // "
val fileType = dir.fileType.toString
for(i <- 0 until fileType.length) out.append(fileType.charAt(i))
for(i <- 1 to endBlanks) out.append(0x20)
out.append(0x00) // EOL
}
val blocksFreeText = "BLOCKS FREE."
// write next line address
ptr += 2 + 2 + blocksFreeText.length + 1
out.append(ptr & 0xFF) // L
out.append(ptr >> 8) // H
val blocksFree = bam.freeSectors
// write block free
out.append(blocksFree & 0xFF) // L
out.append(blocksFree >> 8) // H
for(i <- 0 until blocksFreeText.length) out.append(blocksFreeText.charAt(i))
out.append(0x00) // EOL
out.append(0x00)
out.append(0x00)
FileData("$",0x801,out.toArray)
}
}
|
abbruzze/kernal64
|
Kernal64/src/ucesoft/cbm/formats/Diskette.scala
|
Scala
|
mit
| 10,842
|
package fpinscala.testing
import fpinscala.laziness.Stream
import fpinscala.state._
import fpinscala.parallelism._
import fpinscala.parallelism.Par.Par
import Gen._
import Prop._
import java.util.concurrent.{Executors,ExecutorService}
/*
The library developed in this chapter goes through several iterations. This file is just the
shell, which you can fill in and modify while working through the chapter.
*/
trait Status
case object Proven extends Status
case object Unfalsified extends Status
case object Exhausted extends Status
case class Prop(run: (MaxSize, TestCases, RNG) => Result) {
import fpinscala.state._
def &&(p: Prop): Prop = Prop {
(m,n,rng) =>
run(m,n,rng) match {
case Right(_) => p.run(m,n,rng)
case x => x
}
}
def ||(p: Prop): Prop = Prop {
(m,n,rng) =>
run(m,n,rng) match {
case Left(_) => p.run(m,n,rng)
case x => x
}
}
def run(p: Prop, maxSize: Int = 100, testCases: Int = 100, rng: RNG = RNG.Simple(System.currentTimeMillis)):Unit = {
p.run(maxSize, testCases, rng) match {
case Left(msg) => println("! test failed:\\n" + msg)
case Right((Unfalsified, n)) =>
println("+ property unfalsified, ran " + n + " tests")
case Right((Proven, n)) =>
println("+ propterty proven, ran " + n + " tests")
case Right((Exhausted, n)) =>
println("+ property unfalsified up to max size, ran " + n + " tests")
}
}
}
object Prop {
import fpinscala.state._
import fpinscala.laziness._
type FailedCase = String
type SuccessCount = Int
type TestCases = Int
type MaxSize = Int
type Result = Either[FailedCase, (Status, SuccessCount)]
def forAll[A](gen: Gen[A])(f: A => Boolean): Prop = Prop {
(max: Int, n: Int, rng: RNG) => {
def go(i: Int, j: Int, s: Stream[A], onEnd: Int => Result): Result =
if (i == j) Right((Unfalsified, i))
else s match {
/*case Some(Some(h), t) =>
try {
if (f(h)) go(i+1, j, s, onEnd)
else Left(h.toString)
} catch {
case e: Throwable =>
}
case Some(None, _) => Right((Unfalsified, i))
case None => onEnd(i)*/
case _ => onEnd(i)
}
go(0, n/3, gen.exhausitive, i => Right((Proven, i))) match {
case Right((Unfalsified, _)) =>
val rands = randomStream(gen)(rng)
go(n/3, n, rands, i => Right((Unfalsified, i)))
case s => s
}
}
}
def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] =
Stream.unfold(rng)(rng => Some(g.sample.run(rng)))
def forAll[A](g: SGen[A])(f: A => Boolean): Prop =
forAll(g.forSize)(f)
def forAll[A](g: Int => Gen[A])(f: A => Boolean): Prop = Prop {
(max,n,rng) =>
val casesPerSize = (n - 1) / max + 1
val props: Stream[Prop] =
Stream.from(0).take((n min max) + 1).map(i => forAll(g(i))(f))
val prop: Prop =
props.map(p => Prop { (max, n, rng) =>
p.run(max, casesPerSize, rng)
}).toList.reduce(_ && _)
prop.run(max,n,rng)
}
}
object Gen {
import fpinscala.state._
import fpinscala.laziness._
def unit[A](a: => A): Gen[A] =
Gen(State.unit(a), Stream(a))
def boolean: Gen[Boolean] =
Gen(State(RNG.boolean), Stream(true, false))
val uniform: Gen[Double] = Gen(State(RNG.double), Stream[Double]())
def choose(start: Int, stopExclusive: Int): Gen[Int] =
Gen(State(RNG.nonNegativeInt).map(n => start + n % (stopExclusive - start)), Stream(start to stopExclusive: _*))
def listOfN[A](n: Int, g: Gen[A]): Gen[List[A]] =
Gen(State.sequence(List.fill(n)(g.sample)), Stream.constant(g.exhausitive.toList))
def even(start: Int, stopExclusive: Int): Gen[Int] =
choose(start, stopExclusive).map(x => if (x % 2 == 0) x else x + 1)
def odd(start: Int, stopExclusive: Int): Gen[Int] =
choose(start, stopExclusive).map(x => if (x % 2 != 0) x else x + 1)
def sameParity(from: Int, to: Int): Gen[(Int, Int)] =
for {
x <- choose(from, to)
y <- if (x % 2 == 0) even(from, to) else odd(from, to)
} yield (x, y)
def listOfNViaMap2[A](n: Int, g: Gen[A]): Gen[List[A]] =
List.fill(n)(g).foldRight(Gen.unit(List[A]()))(_.map2(_)(_ :: _))
def union[A](g1: Gen[A], g2: Gen[A]): Gen[A] =
boolean.flatMap(x => if (x) g1 else g2)
def weighted[A](g1: (Gen[A],Double), g2: (Gen[A], Double)): Gen[A] = {
val radio = (g1._2) / (g1._2 + g2._2)
uniform.flatMap(x => if (x > radio) g2._1 else g1._1)
}
}
case class Gen[+A](sample: State[RNG,A], exhausitive: Stream[A]) {
def map[B](f: A => B): Gen[B] =
Gen(sample.map(f), exhausitive.map(f))
def flatMap[B](f: A => Gen[B]): Gen[B] =
Gen(sample.flatMap(x => f(x).sample), exhausitive.flatMap(x => f(x).exhausitive))
def map2[B,C](b: Gen[B])(f: (A, B) => C): Gen[C] =
Gen(sample.map2(b.sample)(f), Stream())
def unsized: SGen[A] =
SGen(_ => this)
def listOfN(n: Int): Gen[List[A]] =
Gen(State.sequence(List.fill(n)(sample)), Stream.constant(exhausitive.toList))
}
case class SGen[+A](forSize: Int => Gen[A]) {
def map[B](f: A => B): SGen[B] =
SGen(forSize andThen (_ map f))
def flatMap[B](f: A => Gen[B]): SGen[B] =
SGen(forSize andThen (_ flatMap f))
def map2[B,C](b: SGen[B])(f: (A, B) => C):SGen[C] =
SGen((n: Int) => forSize(n).map2(b.forSize(n))(f))
}
object SGen {
def listOf[A](g: Gen[A]): SGen[List[A]] =
SGen(n => g.listOfN(n))
}
|
724399396/function-programming-in-scala
|
exercises/src/main/scala/fpinscala/testing/Gen.scala
|
Scala
|
mit
| 5,497
|
package com.plasmaconduit.convergence.counters
import com.twitter.algebird._
import com.twitter.algebird.Operators._
final case class GACounter(actors: Map[String, GCounter]) {
def inc(actor: String): GACounter = {
val counter = actors.get(actor).map(_.inc).getOrElse(GCounter(1))
GACounter(actors + Map(actor -> counter))
}
def toLong = actors.values.map(_.toLong).sum
}
object GACounter {
object GACounterSemigroup extends Semigroup[GACounter] {
def plus(l: GACounter, r: GACounter): GACounter = {
GACounter(l.actors + r.actors)
}
}
object GACounterMonoid extends Monoid[GACounter] {
def zero: GACounter = GACounter(Map())
def plus(l: GACounter, r: GACounter): GACounter = {
GACounterSemigroup.plus(l, r)
}
}
implicit val gaCounterSemigroup: Semigroup[GACounter] = GACounterSemigroup
implicit val gaCounterMonoid: Monoid[GACounter] = GACounterMonoid
}
|
plasmaconduit/convergence
|
src/main/scala/com/plasmaconduit/convergence/counters/GACounter.scala
|
Scala
|
mit
| 921
|
package io.citrine.lolo.trees.multitask
import io.citrine.lolo.PredictionResult
import io.citrine.lolo.linear.GuessTheMeanLearner
import io.citrine.lolo.trees.regression.RegressionTrainingLeaf
import io.citrine.lolo.trees.splits.{MultiTaskSplitter, NoSplit}
import io.citrine.lolo.trees.{InternalModelNode, ModelNode, TrainingLeaf}
import scala.collection.mutable
import scala.util.Random
/**
* Node in a multi-task training tree, which can produce nodes for its model trees.
* Splits are chosen using a MultiTaskSplitter, which considers the sum impurity decrease across all tasks.
*
* @param inputs data on which to select splits and form models
* @param numFeatures to randomly select from at each split (negative values indicate that all features should be considered)
* @param maxDepth to grow the tree to
* @param minInstances minimum number of training instances per leaf
* @param randomizePivotLocation whether to generate splits randomly between the data points
* @param rng random number generator, for reproducibility
*/
class MultiTaskTrainingNode(
inputs: Seq[(Vector[AnyVal], Array[AnyVal], Double)],
numFeatures: Int,
maxDepth: Int,
minInstances: Int,
randomizePivotLocation: Boolean = false,
rng: Random = Random
) {
// Compute a split
val (split, deltaImpurity) = if (maxDepth <= 0) {
(new NoSplit, 0.0)
} else {
MultiTaskSplitter(rng = rng, randomizePivotLocation = randomizePivotLocation).getBestSplit(
inputs,
numFeatures,
minInstances
)
}
// Try to construct left and right children
val (leftChild: Option[MultiTaskTrainingNode], rightChild: Option[MultiTaskTrainingNode]) = split match {
case _: NoSplit => (None, None)
case _: Any =>
val (leftData, rightData) = inputs.partition(row => split.turnLeft(row._1))
(
Some(
new MultiTaskTrainingNode(
leftData,
numFeatures,
maxDepth - 1,
minInstances,
randomizePivotLocation,
rng = rng
)
),
Some(
new MultiTaskTrainingNode(
rightData,
numFeatures,
maxDepth - 1,
minInstances,
randomizePivotLocation,
rng = rng
)
)
)
}
// get feature importance for the i'th label
def getFeatureImportance(index: Int): mutable.ArraySeq[Double] = {
// Filter out "missing" values, which are NaN for regression and 0 for encoded categoricals
val label = inputs.head._2(index)
val reducedData = if (label.isInstanceOf[Double]) {
inputs.map(x => (x._1, x._2(index).asInstanceOf[Double], x._3)).filterNot(_._2.isNaN)
} else {
inputs.map(x => (x._1, x._2(index).asInstanceOf[Char], x._3)).filter(_._2 > 0)
}
// Compute the valid data for each child
val (left, right) = reducedData.partition(r => split.turnLeft(r._1))
// get feature importance from the children if they exist, or from this node if it is a leaf
(leftChild, rightChild) match {
case (Some(theLeftChild), Some(theRightChild)) if left.nonEmpty && right.nonEmpty =>
val ans =
theLeftChild.getFeatureImportance(index).zip(theRightChild.getFeatureImportance(index)).map(p => p._1 + p._2)
ans(split.getIndex()) = ans(split.getIndex()) + deltaImpurity
ans
case (Some(theLeftChild), _) if left.nonEmpty =>
theLeftChild.getFeatureImportance(index)
case (_, Some(theRightChild)) if right.nonEmpty =>
theRightChild.getFeatureImportance(index)
case (_, _) =>
if (label.isInstanceOf[Double]) {
new RegressionTrainingLeaf(
reducedData.asInstanceOf[Seq[(Vector[AnyVal], Double, Double)]],
GuessTheMeanLearner(),
1
).getFeatureImportance()
} else {
new TrainingLeaf[Char](
reducedData.asInstanceOf[Seq[(Vector[AnyVal], Char, Double)]],
GuessTheMeanLearner(),
1
).getFeatureImportance()
}
}
}
// Construct the model node for the `index`th label
def getNode(index: Int): ModelNode[PredictionResult[Any]] = {
// Filter out "missing" values, which are NaN for regression and 0 for encoded categoricals
val label = inputs.head._2(index)
val reducedData = if (label.isInstanceOf[Double]) {
inputs.map(x => (x._1, x._2(index).asInstanceOf[Double], x._3)).filterNot(_._2.isNaN)
} else {
inputs.map(x => (x._1, x._2(index).asInstanceOf[Char], x._3)).filter(_._2 > 0)
}
// Compute the valid data for each child
val (left, right) = reducedData.partition(r => split.turnLeft(r._1))
// Construct an internal node if the children are defined and actually have valid data
(leftChild, rightChild) match {
case (Some(theLeftChild), Some(theRightChild)) if left.nonEmpty && right.nonEmpty =>
if (label.isInstanceOf[Double]) {
new InternalModelNode[PredictionResult[Double]](
split,
theLeftChild.getNode(index).asInstanceOf[ModelNode[PredictionResult[Double]]],
theRightChild.getNode(index).asInstanceOf[ModelNode[PredictionResult[Double]]],
outputDimension = 0, // Don't support multitask SHAP at this time.
trainingWeight = reducedData.length.toDouble
)
} else {
if (!label.isInstanceOf[Char]) throw new IllegalArgumentException("Training data wasn't double or char")
new InternalModelNode[PredictionResult[Char]](
split,
theLeftChild.getNode(index).asInstanceOf[ModelNode[PredictionResult[Char]]],
theRightChild.getNode(index).asInstanceOf[ModelNode[PredictionResult[Char]]],
outputDimension = 0, // Don't support multitask SHAP at this time.
trainingWeight = reducedData.length.toDouble
)
}
case (Some(theLeftChild), _) if left.nonEmpty =>
theLeftChild.getNode(index)
case (_, Some(theRightChild)) if right.nonEmpty =>
theRightChild.getNode(index)
case (_, _) =>
if (label.isInstanceOf[Double]) {
new RegressionTrainingLeaf(
reducedData.asInstanceOf[Seq[(Vector[AnyVal], Double, Double)]],
GuessTheMeanLearner(),
1
).getNode()
} else {
new TrainingLeaf[Char](
reducedData.asInstanceOf[Seq[(Vector[AnyVal], Char, Double)]],
GuessTheMeanLearner(),
1
).getNode()
}
}
}
}
|
CitrineInformatics/lolo
|
src/main/scala/io/citrine/lolo/trees/multitask/MultiTaskTrainingNode.scala
|
Scala
|
apache-2.0
| 6,580
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.h2o
import hex.deeplearning.{DeepLearning, DeepLearningModel, DeepLearningParameters}
import org.apache.spark.examples.h2o.DemoUtils._
import org.apache.spark.h2o._
import org.apache.spark.mllib.feature.{HashingTF, IDF, IDFModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext, SparkFiles, mllib}
import water.app.{ModelMetricsSupport, SparkContextSupport}
/**
* Demo for NYC meetup and MLConf 2015.
*
* It predicts spam text messages.
* Training dataset is available in the file smalldata/smsData.txt.
*/
object HamOrSpamDemo extends SparkContextSupport with ModelMetricsSupport {
val DATAFILE="smsData.txt"
val TEST_MSGS = Seq(
"Michal, beer tonight in MV?",
"We tried to contact you re your reply to our offer of a Video Handset? 750 anytime any networks mins? UNLIMITED TEXT?")
def main(args: Array[String]) {
val conf: SparkConf = configure("Sparkling Water Meetup: Ham or Spam (spam text messages detector)")
// Create SparkContext to execute application on Spark cluster
val sc = new SparkContext(conf)
// Register input file as Spark file
addFiles(sc, absPath("examples/smalldata/" + DATAFILE))
// Initialize H2O context
implicit val h2oContext = H2OContext.getOrCreate(sc)
import h2oContext._
// Initialize SQL context
implicit val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
// Data load
val data = load(sc, DATAFILE)
// Extract response spam or ham
val hamSpam = data.map( r => r(0))
val message = data.map( r => r(1))
// Tokenize message content
val tokens = tokenize(message)
// Build IDF model
var (hashingTF, idfModel, tfidf) = buildIDFModel(tokens)
// Merge response with extracted vectors
val resultRDD: DataFrame = hamSpam.zip(tfidf).map(v => SMS(v._1, v._2)).toDF
val table:H2OFrame = resultRDD
// Transform target column into
table.replace(table.find("target"), table.vec("target").toCategoricalVec).remove()
// Split table
val keys = Array[String]("train.hex", "valid.hex")
val ratios = Array[Double](0.8)
val frs = split(table, keys, ratios)
val (train, valid) = (frs(0), frs(1))
table.delete()
// Build a model
val dlModel = buildDLModel(train, valid)
// Collect model metrics
val trainMetrics = binomialMM(dlModel, train)
val validMetrics = binomialMM(dlModel, valid)
println(
s"""
|AUC on train data = ${trainMetrics.auc._auc}
|AUC on valid data = ${validMetrics.auc._auc}
""".stripMargin)
// Detect spam messages
TEST_MSGS.foreach(msg => {
println(
s"""
|"$msg" is ${if (isSpam(msg,sc, dlModel, hashingTF, idfModel)) "SPAM" else "HAM"}
""".stripMargin)
})
sc.stop()
}
/** Data loader */
def load(sc: SparkContext, dataFile: String): RDD[Array[String]] = {
sc.textFile(SparkFiles.get(dataFile)).map(l => l.split("\t")).filter(r => !r(0).isEmpty)
}
/** Text message tokenizer.
*
* Produce a bag of word representing given message.
*
* @param data RDD of text messages
* @return RDD of bag of words
*/
def tokenize(data: RDD[String]): RDD[Seq[String]] = {
val ignoredWords = Seq("the", "a", "", "in", "on", "at", "as", "not", "for")
val ignoredChars = Seq(',', ':', ';', '/', '<', '>', '"', '.', '(', ')', '?', '-', '\'','!','0', '1')
val texts = data.map( r=> {
var smsText = r.toLowerCase
for( c <- ignoredChars) {
smsText = smsText.replace(c, ' ')
}
val words =smsText.split(" ").filter(w => !ignoredWords.contains(w) && w.length>2).distinct
words.toSeq
})
texts
}
/** Buil tf-idf model representing a text message. */
def buildIDFModel(tokens: RDD[Seq[String]],
minDocFreq:Int = 4,
hashSpaceSize:Int = 1 << 10): (HashingTF, IDFModel, RDD[mllib.linalg.Vector]) = {
// Hash strings into the given space
val hashingTF = new HashingTF(hashSpaceSize)
val tf = hashingTF.transform(tokens)
// Build term frequency-inverse document frequency
val idfModel = new IDF(minDocFreq = minDocFreq).fit(tf)
val expandedText = idfModel.transform(tf)
(hashingTF, idfModel, expandedText)
}
/** Builds DeepLearning model. */
def buildDLModel(train: Frame, valid: Frame,
epochs: Int = 10, l1: Double = 0.001, l2: Double = 0.0,
hidden: Array[Int] = Array[Int](200, 200))
(implicit h2oContext: H2OContext): DeepLearningModel = {
import h2oContext._
// Build a model
val dlParams = new DeepLearningParameters()
dlParams._model_id = water.Key.make("dlModel.hex")
dlParams._train = train
dlParams._valid = valid
dlParams._response_column = 'target
dlParams._epochs = epochs
dlParams._l1 = l1
dlParams._hidden = hidden
// Create a job
val dl = new DeepLearning(dlParams)
val dlModel = dl.trainModel.get
// Compute metrics on both datasets
dlModel.score(train).delete()
dlModel.score(valid).delete()
dlModel
}
/** Spam detector */
def isSpam(msg: String,
sc: SparkContext,
dlModel: DeepLearningModel,
hashingTF: HashingTF,
idfModel: IDFModel,
hamThreshold: Double = 0.5)
(implicit sqlContext: SQLContext, h2oContext: H2OContext):Boolean = {
import h2oContext._
import sqlContext.implicits._
val msgRdd = sc.parallelize(Seq(msg))
val msgVector: DataFrame = idfModel.transform(
hashingTF.transform (
tokenize (msgRdd))).map(v => SMS("?", v)).toDF
val msgTable: H2OFrame = msgVector
msgTable.remove(0) // remove first column
val prediction = dlModel.score(msgTable)
//println(prediction)
prediction.vecs()(1).at(0) < hamThreshold
}
}
/** Training message representation. */
case class SMS(target: String, fv: mllib.linalg.Vector)
|
nilbody/sparkling-water
|
examples/src/main/scala/org/apache/spark/examples/h2o/HamOrSpamDemo.scala
|
Scala
|
apache-2.0
| 6,861
|
package poly.algebra
import poly.algebra.factory._
import poly.algebra.specgroup._
/**
* Represents a multiplicative commutative monoid.
*
* An instance of this typeclass should satisfy the following axioms:
* - $lawMultiplicativeAssociativity
* - $lawMultiplicativeIdentity
* - $lawMultiplicativeCommutativity
* @author Tongfei Chen
*/
trait MultiplicativeCMonoid[@sp(fdi) X] extends MultiplicativeMonoid[X] with MultiplicativeCSemigroup[X] { self =>
override def asMonoidWithMul: CMonoid[X] = new CMonoid[X] {
def id = self.one
def op(x: X, y: X) = self.mul(x, y)
}
}
object MultiplicativeCMonoid extends ImplicitGetter[MultiplicativeCMonoid] {
def create[@sp(fdi) X](f: (X, X) => X, oneElem: X): MultiplicativeCMonoid[X] = new MultiplicativeCMonoid[X] {
def mul(x: X, y: X): X = f(x, y)
def one: X = oneElem
}
}
|
ctongfei/poly-algebra
|
src/main/scala/poly/algebra/MultiplicativeCMonoid.scala
|
Scala
|
mit
| 852
|
import org.apache.spark.{rdd, SparkContext, SparkConf}
object simpleSpark {
def main(args: Array[String])
{
val conf = new SparkConf().setAppName("Simple Application")
val logFile = "FlumeData.1457910781124" // Should be some file on your system
val sc = new SparkContext(conf)
val lines = sc.textFile(logFile, 2)
val numAs = lines.filter(line => line.contains("a")).count()
val numBs = lines.filter(line => line.contains("b")).count()
println("Lines with a: %s, Lines with b: %s".format(numAs, numBs))
sc.stop()
}
}
|
alexciobanu/ExpertSummit
|
Streeming/src/main/scala/simpleSpark.scala
|
Scala
|
apache-2.0
| 545
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers
import monix.execution.Ack.{Continue, Stop}
import monix.execution.exceptions.DummyException
import monix.reactive.{BaseTestSuite, Observer}
object ContramapObserverSuite extends BaseTestSuite {
test("Observer.contramap equivalence with plain Observer") { implicit s =>
check1 { (xs: List[Int]) =>
var sum = 0
val plainObserver: Observer[Int] = new Observer[Int] {
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = sum += 100
def onNext(elem: Int) = {
sum += elem
Continue
}
}
val contramapObserver: Observer[Long] =
plainObserver.contramap(_.toInt)
val plainAck = plainObserver.onNextAll(xs)
val contraAck = contramapObserver.onNextAll(xs.map(_.toLong))
s.tick()
plainAck.syncTryFlatten(s) == Continue &&
contraAck.syncTryFlatten(s) == Continue &&
sum == xs.sum * 2
}
}
test("Observer.contramap protects against user code") { implicit s =>
val dummy = DummyException("dummy")
val out: Observer[Long] = (Observer.empty[Int]: Observer[Int])
.contramap(_ => throw dummy)
s.tick()
assertEquals(out.onNext(1), Stop)
}
test("Observer.contramap works") { implicit s =>
var isDone = 0
val intObserver: Observer[Int] = new Observer[Int] {
def onError(ex: Throwable): Unit = isDone += 1
def onComplete(): Unit = isDone += 1
def onNext(elem: Int) = Continue
}
val doubleObserver: Observer[Double] = intObserver.contramap(_.toInt)
assertEquals(doubleObserver.onNext(1.0), Continue)
doubleObserver.onComplete()
assertEquals(isDone, 1)
doubleObserver.onError(DummyException("dummy"))
assertEquals(isDone, 1)
assertEquals(doubleObserver.onNext(2.0), Stop)
}
}
|
monifu/monifu
|
monix-reactive/shared/src/test/scala/monix/reactive/observers/ContramapObserverSuite.scala
|
Scala
|
apache-2.0
| 2,497
|
package co.rc.authmanager.persistence.daos
import co.rc.authmanager.persistence.daos.base.DAO
import io.strongtyped.active.slick.Lens
import io.strongtyped.active.slick.Lens._
import slick.ast.BaseTypedType
/**
* Class that defines DAO implementation for RolesIps
*/
class RolesIpsDAO extends DAO {
import jdbcProfile.api._
override type Id = Int
override type Entity = RoleIp
override type EntityTable = RolesIps
override val baseTypedType: BaseTypedType[ Int ] = implicitly[ BaseTypedType[ Id ] ]
override val idLens: Lens[ RoleIp, Option[ Int ] ] = lens { element: RoleIp => element.id } { ( element, id ) => element.copy( id = id ) }
override val tableQuery: TableQuery[ RolesIps ] = RolesIps
override def $id( table: RolesIps ): Rep[ Int ] = table.id
}
|
rodricifuentes1/authentication-manager
|
src/main/scala/co/rc/authmanager/persistence/daos/RolesIpsDAO.scala
|
Scala
|
mit
| 786
|
package org.bitcoins.core.number
import org.bitcoins.testkitcore.util.BitcoinSUnitTest
import scodec.bits.ByteVector
class UInt8Test extends BitcoinSUnitTest {
"UInt8" must "convert a byte to a UInt8 correctly" in {
UInt8.toUInt8(0.toByte) must be(UInt8.zero)
UInt8.toUInt8(1.toByte) must be(UInt8.one)
UInt8.toUInt8(255.toByte) must be(UInt8(255.toShort))
}
it must "throw an exception if we try and create an UInt8 with more than 1 bytes" in {
intercept[IllegalArgumentException] {
UInt8(ByteVector(0.toByte, 0.toByte))
}
}
}
|
bitcoin-s/bitcoin-s
|
core-test/src/test/scala/org/bitcoins/core/number/UInt8Test.scala
|
Scala
|
mit
| 567
|
package com.twitter.finatra.http.marshalling
import com.twitter.finagle.http.Message
import com.twitter.util.jackson.ScalaObjectMapper
object mapper {
implicit class RichObjectMapper[M <: ScalaObjectMapper](val self: M) extends AnyVal {
def parseMessageBody[T: Manifest](message: Message): T = {
if (message.isRequest) {
val length = message.contentLength.getOrElse(0L)
if (length == 0) {
throw new UnsupportedOperationException(
"Injecting request attributes (e.g. QueryParam, Header, etc) not supported when explicitly calling " +
"ScalaObjectMapper.parse. Instead use a 'case class' input parameter on a Controller callback " +
"(e.g. get('/') { r: ClassWithRequestAttributes => ... } ).")
}
}
MessageBodyReader.parseMessageBody(message, self.reader[T])
}
}
}
|
twitter/finatra
|
http-core/src/main/scala/com/twitter/finatra/http/marshalling/mapper.scala
|
Scala
|
apache-2.0
| 870
|
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation._
import leon.lang._
object BitsTricks {
def bitAt(x: Int, n: Int): Boolean = {
require(n >= 0 && n < 32)
((x >> n) & 1) == 1
}
def isEven(x: Int): Boolean = {
(x & 1) == 0
} ensuring(res => res == (x % 2 == 0))
def isNegative(x: Int): Boolean = {
(x >>> 31) == 1
} ensuring(b => b == x < 0)
def isBitNSet(x: Int, n: Int): Int = {
require(n >= 0 && n < 32)
(x & (1 << n))
}
def testBitSet1(): Int = {
isBitNSet(122, 3)
} ensuring(_ != 0)
def testBitSet2(): Int = {
isBitNSet(-33, 5)
} ensuring(_ == 0)
def setBitN(x: Int, n: Int): Int = {
require(n >= 0 && n < 32)
x | (1 << n)
} ensuring(res => isBitNSet(res, n) != 0)
def toggleBitN(x: Int, n: Int): Int = {
require(n >= 0 && n < 32)
x ^ (1 << n)
} ensuring(res =>
if(isBitNSet(x, n) != 0) isBitNSet(res, n) == 0
else isBitNSet(res, n) != 0)
def checkDoubleXor(x: Int, y: Int): Int = {
x ^ y ^ x
} ensuring(res => res == y)
def turnOffRightmostOneRec(x: Int, index: Int): Int = {
require(index >= 0 && index < 32)
if(bitAt(x, index)) toggleBitN(x, index)//(x ^ (1 << index))
else if(index == 31) x
else turnOffRightmostOneRec(x, index + 1)
}
/*
* loops forever on the proof
*/
def turnOffRightmostOne(x: Int): Int = {
x & (x - 1)
} //ensuring(_ == turnOffRightmostOneRec(x, 0))
// 010100 -> 010111
def rightPropagateRightmostOne(x: Int): Int = {
x | (x - 1)
}
def property1(x: Int): Boolean = {
val y = rightPropagateRightmostOne(x)
y == rightPropagateRightmostOne(y)
} ensuring(b => b)
def isRotationLeft(x: Int, y: Int, n: Int, i: Int): Boolean = {
require(i >= 0 && i <= 32 && n >= 0 && n < 32)
if(i == 32)
true
else bitAt(x, i) == bitAt(y, (i + n) % 32) && isRotationLeft(x, y, n, i+1)
}
//rotateLeft proves in 1 minute (on very powerful computer)
def rotateLeft(x: Int, n: Int): Int = {
require(n >= 0 && n < 32)
val front = x >>> (32 - n)
(x << n) | front
} //ensuring(res => isRotationLeft(x, res, n, 0))
//careful with overflows, case definition, truncated
def safeMean(x: Int, y: Int): Int = {
if(x >= 0 && y <= 0 || x <= 0 && y >= 0) (x + y)/2
else if(x >= 0 && x <= y) x + (y - x)/2
else if(x >= 0 && y <= x) y + (x - y)/2
else if(x <= 0 && x <= y) y + (x - y)/2
else x + (y - x)/2
}
//proves in 45 seconds
def magicMean(x: Int, y: Int): Int = {
val t = (x&y)+((x^y) >> 1)
t + ((t >>> 31) & (x ^ y))
} //ensuring(res => res == safeMean(x, y))
}
|
epfl-lara/leon
|
src/test/resources/regression/verification/newsolvers/valid/BitsTricks.scala
|
Scala
|
gpl-3.0
| 2,635
|
package mlbigbook.math
import breeze.math.Semiring
import breeze.linalg.operators._
import breeze.storage.Zero
import scala.language.{higherKinds, implicitConversions}
import scala.reflect.ClassTag
/**
* An abstraction specifying operations one may perform using vectors and
* scalar values. These operations include element-wise & scalar
* multiplication, division, addition, and subtraction. Support for the dot
* product of two vectors is also included. As well as methods to construct new
* vector instances.
*/
trait MathVectorOps[V[_]] extends VectorOps[V] {
type N
implicit val n: Fractional[N]
implicit val z: Zero[N]
implicit val s: Semiring[N]
/**
* Creates a new vector of the input size where each element has value 0.
*/
def zeros(size: Int): V[N]
/**
* Creates a new vector of the input size where each element has value 1.
*/
def ones(size: Int): V[N]
protected lazy val zero = implicitly[Fractional[N]].zero
protected lazy val one = implicitly[Fractional[N]].one
/**
* Change every element of a vector V using the function f.
* No side effects.
*/
def map[B: ClassTag: Fractional: Zero](v: V[N])(f: N => B): V[B]
/**
* Apply a binary combination operator, r, to pairs of elements from the
* input vector, v. Note that the output of r shall be applied to both
* vector elements as well as other, previous outputs from r. The order of
* execution is not guaranteed. Therefore, it is important that r is
* associative and communiative.
*/
def reduce[A1 >: N: ClassTag](v: V[N])(r: (A1, A1) => A1): A1
/**
* From the starting value, zero, applies the function combine to elements
* of the input vector v. This method evaluates to the final accumulated
* value of this operation across all elements of the vector. Execution
* order is not guaranteed, so combine must be side-effect free,
* associative, and communicative.
*/
def fold[B: ClassTag](v: V[N])(zero: B)(combine: (B, N) => B): B
/**
* Create a new vector of the input size where each element has the value v.
*/
def fill[A: ClassTag: Zero](size: Int)(v: => A): V[A]
/**
* Performs a shallow copy of the vector's contents. Each element is copied
* to a newly allocated vector of type V[N]. If N is a primitive or other
* value type, then this will be a deep copy. Otherwise, the reference will
* be copied.
*/
def copy(v: V[N]): V[N]
/**
* Performs element-wise addition of two vectors.
*/
val addV: OpAdd.Impl2[V[N], V[N], V[N]]
/**
* Adds a scalar to each element of a vector.
*/
val addS: OpAdd.Impl2[V[N], N, V[N]]
/**
* Performs element-wise subtraction of two vectors.
*/
val subV: OpSub.Impl2[V[N], V[N], V[N]]
/**
* Subtracts a scalar from each element of a vector.
*/
val subS: OpSub.Impl2[V[N], N, V[N]]
/**
* Performs a dot product operation between two vectors,
* which results in a scalar.
*/
val dot: OpMulInner.Impl2[V[N], V[N], N]
/**
* Performs element-wise multiplication between two vectors.
*/
val mulV: OpMulScalar.Impl2[V[N], V[N], V[N]]
/**
* Multiplies each vector element by a scalar.
*/
val mulS: OpMulScalar.Impl2[V[N], N, V[N]]
/**
* Performs element-wise division between two vectors.
*/
val divV: OpDiv.Impl2[V[N], V[N], V[N]]
/**
* Divides each vector element by a scalar.
*/
val divS: OpDiv.Impl2[V[N], N, V[N]]
}
object MathVectorOps {
type Type[Num, Vec[_]] = MathVectorOps[Vec] {
type N = Num
}
object Implicits {
// dense operations
implicit val DoubleDenseVot = DoubleDenseMathVector
implicit val FloatDenseVot = FloatDenseMathVector
implicit val LongDenseVot = LongDenseMathVector
implicit val IntDenseVot = IntDenseMathVector
// sparse operations
implicit val DoubleSparseVot = DoubleSparseMathVector
implicit val FloatSparseVot = FloatSparseMathVector
implicit val LongSparseVot = LongSparseMathVector
implicit val IntSparseVot = IntSparseMathVector
}
}
|
malcolmgreaves/bigmlbook
|
fp4ml-main/src/main/scala/mlbigbook/math/MathVectorOps.scala
|
Scala
|
lgpl-3.0
| 4,111
|
package util
import Functors._
import scala.annotation.tailrec
/**
* Just the usual fold grammar
*/
abstract class Foldable[+T, F[_]: Functor] extends java.io.Serializable { self =>
def fold[R](z: R, combine: (R, T) => R): F[R]
/**
* map. Pretty nice, cause we can forward the map
* function over to the underlying parser, it's exactly
* the same!
*/
def map[U](f: T => U) = new Foldable[U, F] {
def fold[R](z: R, combine: (R, U) => R): F[R] = self.fold(
z,
(acc: R, elem: T) => combine(acc, f(elem))
)
}
/**
* filter
*/
def filter(p: T => Boolean) = new Foldable[T, F] {
def fold[R](z: R, comb: (R, T) => R) = self.fold(
z,
(acc: R, elem: T) => if (p(elem)) comb(acc, elem) else acc
)
}
/**
* flatMap. It is unclear what semantics this should have for now
* let's implement it later
*/
/*def flatMap[U](f: T => CPSList[U, R]) = new Foldable[U, R] {
def fold(z: R, comb: Combine[U, R]) = self.fold(
z,
(acc: R, elem: T) => {
val nestedList = f(elem)
nestedList.fold(acc, comb)
}
)
}*/
/**
* partition
* This will create code what will run through the original fold twice
* once for the positive predicate, once for the negative.
*
* see the following related post: http://manojo.github.io/2015/03/03/staged-foldleft-partition/
*/
def partition(p: T => Boolean): (Foldable[T, F], Foldable[T, F]) = {
val trues = this filter p
val falses = this filter (a => !p(a))
(trues, falses)
}
/**
* partfition, that produces a CPSList over `Either` instead of
* two `CPSList`s. The important thing is to keep the one
* CPSList abstraction.
* This can be rewritten using `map`.
* see the following related post: http://manojo.github.io/2015/03/12/staged-foldleft-groupby/
*/
def partitionBis(p: T => Boolean) =
this map (elem => if (p(elem)) Left(elem) else Right(elem))
/**
* groupWith
* takes a function which computes some grouping property
* does not create groups just yet, just propagates key-value pairs
*
* can be rewritten using `map`.
* see the following related post: http://manojo.github.io/2015/03/12/staged-foldleft-groupby/
*/
def groupWith[K](f: T => K): Foldable[(K, T), F] =
this map (elem => (f(elem), elem))
/**
* utility functions that make it easier to write fold-like functions
*/
def toListF[U >: T]: F[List[U]] = toListBufferF map (_.toList)
def toMyListF[U >: T]: F[MyList[U]] = toListBufferF map MyList.bufferToMyList
import scala.collection.mutable.ListBuffer
def toListBufferF[U >: T]: F[ListBuffer[U]] = {
val folded: F[ListBuffer[U]] = self.fold[ListBuffer[U]](
ListBuffer.empty[U],
(acc: ListBuffer[U], t: U) => { acc += t; acc }
)
folded
}
import scala.collection.mutable.ArrayBuffer
def toArrayBufferF[U >: T]: F[ArrayBuffer[U]] = {
val folded: F[ArrayBuffer[U]] = self.fold[ArrayBuffer[U]](
ArrayBuffer.empty[U],
(acc: ArrayBuffer[U], t: U) => { acc += t; acc }
)
folded
}
def toSkipper: F[Unit] = self.fold((), (acc: Unit, _) => acc)
def toLength: F[Int] = self.fold(0, (acc: Int, _) => acc + 1)
}
/**
* A simple self-contained list-like collection. This is easier to
* handle in the `optimise` macro than the full-blown list. And also
* carries a more restrictive interface, which is helpful
*/
abstract class MyList[+T] extends Foldable[T, Functors.Id] {
def fold[R](z: R, combine: (R, T) => R): R = {
@tailrec
def loop(acc: R, tmpLs: MyList[T]): R = tmpLs match {
case Nil => acc
case Cons(x, xs) => loop(combine(acc, x), xs)
}
loop(z, this)
}
def append[U >: T](elem: U): MyList[U] = this match {
case Nil => Cons(elem, Nil)
case Cons(x, xs) => Cons(x, xs.append(elem))
}
}
object MyList {
import scala.collection.mutable.ListBuffer
def bufferToMyList[T](lb: ListBuffer[T]): MyList[T] =
lb.toList.foldRight(apply[T]())((x, xs) => Cons(x, xs))
def apply[T](): MyList[T] = Nil
}
case object Nil extends MyList[Nothing]
case class Cons[+T](x: T, xs: MyList[T]) extends MyList[T]
|
manojo/parsequery
|
macros/src/main/scala/util/Foldable.scala
|
Scala
|
mit
| 4,179
|
package builder
import play.api.libs.json.{Json, JsUndefined}
import org.scalatest.{FunSpec, Matchers}
class JsonUtilSpec extends FunSpec with Matchers {
it("asOptString") {
JsonUtil.asOptString(Json.toJson("")) should be(None)
JsonUtil.asOptString(Json.toJson(" ")) should be(None)
JsonUtil.asOptString(JsUndefined("null")) should be(None)
JsonUtil.asOptString(Json.toJson("foo")) should be(Some("foo"))
JsonUtil.asOptString(Json.toJson(" foo ")) should be(Some("foo"))
}
it("asOptBoolean") {
JsonUtil.asOptBoolean(Json.toJson("")) should be(None)
JsonUtil.asOptBoolean(JsUndefined("null")) should be(None)
JsonUtil.asOptBoolean(Json.toJson("true")) should be(Some(true))
JsonUtil.asOptBoolean(Json.toJson("false")) should be(Some(false))
JsonUtil.asOptBoolean(Json.toJson(" false ")) should be(Some(false))
JsonUtil.asOptBoolean(Json.toJson("foo")) should be(None)
}
it("asOptLong") {
JsonUtil.asOptLong(Json.toJson("")) should be(None)
JsonUtil.asOptLong(JsUndefined("null")) should be(None)
JsonUtil.asOptLong(Json.toJson("1")) should be(Some(1l))
JsonUtil.asOptLong(Json.toJson(" 1 ")) should be(Some(1l))
JsonUtil.asOptLong(Json.toJson("foo")) should be(None)
}
it("hasKey") {
JsonUtil.hasKey(Json.obj("a" -> 1), "a") should be(true)
JsonUtil.hasKey(Json.obj("a" -> 1), "b") should be(false)
}
it("parseBoolean") {
JsonUtil.parseBoolean("true") should be(Some(true))
JsonUtil.parseBoolean("false") should be(Some(false))
Seq("", "foo", "[]", "{}").foreach { value =>
JsonUtil.parseBoolean(value) should be(None)
}
}
it("isNumeric") {
Seq("1", "-1", "5").foreach { value =>
JsonUtil.isNumeric(value) should be(true)
}
Seq("", "foo", "[]", "{}").foreach { value =>
JsonUtil.isNumeric(value) should be(false)
}
}
}
|
Seanstoppable/apidoc
|
core/src/test/scala/core/builder/JsonUtilSpec.scala
|
Scala
|
mit
| 1,891
|
package css
import java.net.{InetAddress, InetSocketAddress}
import java.nio.channels.AsynchronousChannelGroup
import java.nio.charset.Charset
import java.nio.file.Paths
import java.util.concurrent.Executors
import cats.effect.IO
import css.CssStreamHandler._
import fs2._
import fs2.io.tcp
import fs2.io.tcp.serverWithLocalAddress
import iolib.util.Resources.mkThreadFactory
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
object CssProcessor extends App {
private val utf8Charset = Charset.forName("UTF-8")
implicit val EC: ExecutionContext = ExecutionContext.fromExecutor(
Executors.newFixedThreadPool(
8, mkThreadFactory(
"fs2-http-spec-ec", daemon = true
)
)
)
implicit val Sch: Scheduler = Scheduler.fromScheduledExecutorService(
Executors.newScheduledThreadPool(
4, mkThreadFactory("fs2-http-spec-scheduler", daemon = true)
)
)
implicit val AG: AsynchronousChannelGroup = AsynchronousChannelGroup.withThreadPool(
Executors.newCachedThreadPool(
mkThreadFactory("fs2-http-spec-AG", daemon = true)
)
)
val src: Stream[IO, Byte] = io.file.readAll[IO](Paths.get("simple.css"), 16)
def delay[A](sleepTime: FiniteDuration): Pipe[IO, A, A] = _.flatMap { a =>
Sch.delay(Stream.eval(IO(a)), sleepTime)
}
val css =
src.through(text.utf8Decode)
.through(cssBlocks)
.through(text.utf8Encode)
val localBindAddress = async.ref[IO, InetSocketAddress].unsafeRunSync()
def postProcessCss(s: Stream[IO, Byte]): Stream[IO, Byte] =
tcp.client[IO](new InetSocketAddress("127.0.0.1", 5000)).flatMap { socket =>
s.to(socket.writes()).drain.onFinalize(socket.endOfOutput) ++ socket.reads(1024, None)
}
val queue = Stream.eval(async.unboundedQueue[IO, String])
def pushCss(s: String): Stream[IO, Unit] = for {
q <- queue
_ <- Stream(s).covary[IO].to(q.enqueue)
_ <- q.dequeue.through(log("test it: ")).drain
} yield ()
val monitor: Stream[IO, Nothing] = queue.flatMap { q =>
println("inside monitor ...")
q.dequeue.through(log("monitor of queue: >")).drain
}
val pushHelper: Pipe[IO, String, String] = _.flatMap { s =>
println("helping with push ...")
IO { pushCss(s)} ; Stream.eval(IO(s))
}
val echoServer: Stream[IO, Byte] =
serverWithLocalAddress[IO](new InetSocketAddress(InetAddress.getByName(null), 5001)).flatMap {
case Left(local) => Stream.eval_(localBindAddress.setAsyncPure(local))
case Right(s) =>
Stream.emit(s.flatMap { socket =>
for {
css <- socket.reads(1024).through(text.utf8Decode andThen cssBlocks)
_ <- Stream(css).covary[IO].through(log("info: "))
cssProcessed <- postProcessCss(Stream(css).through(text.utf8Encode))
_ = println(cssProcessed)
_ <- Stream(cssProcessed).covary[IO].to(socket.writes())
} yield cssProcessed
})
}.joinUnbounded
val cssClient: Stream[IO, Byte] =
tcp.client[IO]( new InetSocketAddress("127.0.0.1", 5000) ).flatMap { socket =>
css.covary[IO].to(socket.writes()).drain.onFinalize(socket.endOfOutput) ++
socket.reads(1024, None)
}
val program = echoServer
// val shutdown: Sink[IO, Unit] = _.evalMap { s =>
// IO { AG.shutdownNow() }
// }
program.run.unsafeRunSync()
// program.run.unsafeRunAsync(println)
def log[A](prefix: String): Pipe[IO, A, A] =
_.evalMap{ s => IO { println(s"$prefix" + s.toString);s}}
}
|
scalavision/scunicorn
|
ideaWorkbench/src/main/scala/css/CssProcessor.scala
|
Scala
|
apache-2.0
| 3,518
|
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.kernels
import breeze.linalg.{DenseMatrix, DenseVector}
/**
* Implementation of the Maximum Likelihood Perceptron (MLP) kernel
*
* @author tailhq date 13/09/16.
* */
class MLPKernel(w: Double, b: Double) extends SVMKernel[DenseMatrix[Double]]
with LocalSVMKernel[DenseVector[Double]]
with Serializable{
override val hyper_parameters = List("w", "b")
state = Map("w" -> w, "b" -> b)
def setw(d: Double): Unit = {
state += ("w" -> d.toDouble)
}
def setoffset(o: Double): Unit = {
state += ("b" -> o)
}
override def evaluateAt(config: Map[String, Double])(
x: DenseVector[Double],
y: DenseVector[Double]): Double =
math.asin(
(config("w")*(x.t*y) + config("b"))/
(math.sqrt(config("w")*(x.t*x) + config("b") + 1) * math.sqrt(config("w")*(y.t*y) + config("b") + 1))
)
override def gradientAt(
config: Map[String, Double])(
x: DenseVector[Double],
y: DenseVector[Double]): Map[String, Double] = {
val (wxy, wxx, wyy) = (
config("w")*(x.t*y) + config("b"),
math.sqrt(config("w")*(x.t*x) + config("b") + 1),
math.sqrt(config("w")*(y.t*y) + config("b") + 1))
val (numerator, denominator) = (wxy, wxx*wyy)
val z = numerator/denominator
val alpha = 1.0/(1.0 - z*z)
Map(
"w" ->
alpha*((denominator*(x.t*y) - numerator*0.5*(wyy*(x.t*x)/wxx + wxx*(y.t*y)/wyy))/(denominator*denominator)),
"b" ->
alpha*((denominator - numerator*0.5*(wyy/wxx + wxx/wyy))/(denominator*denominator))
)
}
}
class MLP1dKernel(w: Double, b: Double) extends LocalSVMKernel[Double]
with Serializable {
override val hyper_parameters = List("w", "b")
state = Map("w" -> w, "b" -> b)
def setw(d: Double): Unit = {
state += ("w" -> d.toDouble)
}
def setoffset(o: Double): Unit = {
state += ("b" -> o)
}
override def evaluateAt(config: Map[String, Double])(
x: Double,
y: Double): Double =
math.asin(
(config("w")*(x*y) + config("b"))/
(math.sqrt(config("w")*(x*x) + config("b") + 1) * math.sqrt(config("w")*(y*y) + config("b") + 1))
)
override def gradientAt(config: Map[String, Double])(x: Double, y: Double): Map[String, Double] = {
val (wxy, wxx, wyy) = (
config("w")*(x*y) + config("b"),
math.sqrt(config("w")*(x*x) + config("b") + 1),
math.sqrt(config("w")*(y*y) + config("b") + 1))
val (numerator, denominator) = (wxy, wxx*wyy)
val z = numerator/denominator
val alpha = 1.0/(1.0 - z*z)
Map(
"w" ->
alpha*((denominator*(x*y) - numerator*0.5*(wyy*(x*x)/wxx + wxx*(y*y)/wyy))/(denominator*denominator)),
"b" ->
alpha*((denominator - numerator*0.5*(wyy/wxx + wxx/wyy))/(denominator*denominator))
)
}
}
|
mandar2812/DynaML
|
dynaml-core/src/main/scala/io/github/tailhq/dynaml/kernels/MLPKernel.scala
|
Scala
|
apache-2.0
| 3,558
|
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.cassandra.source
import com.datamountaineer.streamreactor.common.schemas.ConverterUtil
import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigConstants, CassandraConfigSource, CassandraSettings, CassandraSourceSetting}
import org.mockito.MockitoSugar
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.collection.JavaConverters._
/**
*
*/
class TestCqlGenerator extends AnyWordSpec
with Matchers
with MockitoSugar
with ConverterUtil {
"CqlGenerator should generate timeuuid statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMe("INCREMENTALMODE=timeuuid"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE the_pk_field > maxTimeuuid(?) AND the_pk_field <= minTimeuuid(?) ALLOW FILTERING"
}
"CqlGenerator should generate timestamp statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMe("INCREMENTALMODE=timestamp"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE the_pk_field > ? AND the_pk_field <= ? ALLOW FILTERING"
}
"CqlGenerator should generate dse search timestamp statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMe("INCREMENTALMODE=dsesearchtimestamp"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE solr_query=?"
}
"CqlGenerator should generate bucket timeseries statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMeBucketTimeSeries("INCREMENTALMODE=buckettimeseries"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE the_pk_field > ? AND the_pk_field <= ? AND bucket IN ?"
}
"CqlGenerator should generate token based CQL statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMe("INCREMENTALMODE=token"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE token(the_pk_field) > token(?) LIMIT 200"
}
"CqlGenerator should generate CQL statement with no offset based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureMe("INCREMENTALMODE=token"))
val cqlStatement = cqlGenerator.getCqlStatementNoOffset
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table LIMIT 200"
}
"CqlGenerator should generate format type json CQL statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureJSON())
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE the_pk_field > ? AND the_pk_field <= ? ALLOW FILTERING"
}
"CqlGenerator should generate format type json CQL with keys statement based on KCQL" in {
val cqlGenerator = new CqlGenerator(configureJSONWithKey("string_field"))
val cqlStatement = cqlGenerator.getCqlStatement
cqlStatement shouldBe "SELECT string_field,the_pk_field FROM test.cassandra-table WHERE the_pk_field > ? AND the_pk_field <= ? ALLOW FILTERING"
}
def configureJSON(): CassandraSourceSetting = {
val myKcql = s"INSERT INTO kafka-topic SELECT string_field, the_pk_field FROM cassandra-table PK the_pk_field WITHFORMAT JSON WITHUNWRAP INCREMENTALMODE=TIMESTAMP"
val configMap = {
Map(
CassandraConfigConstants.KEY_SPACE -> "test",
CassandraConfigConstants.KCQL -> myKcql,
CassandraConfigConstants.ASSIGNED_TABLES -> "cassandra-table",
CassandraConfigConstants.POLL_INTERVAL -> "1000",
CassandraConfigConstants.FETCH_SIZE -> "500",
CassandraConfigConstants.BATCH_SIZE -> "800").asJava
}
val configSource = new CassandraConfigSource(configMap)
CassandraSettings.configureSource(configSource).head
}
def configureJSONWithKey(keyField: String): CassandraSourceSetting = {
val myKcql = s"INSERT INTO kafka-topic SELECT string_field, the_pk_field FROM cassandra-table PK the_pk_field WITHFORMAT JSON WITHUNWRAP INCREMENTALMODE=TIMESTAMP WITHKEY($keyField)"
val configMap = {
Map(
CassandraConfigConstants.KEY_SPACE -> "test",
CassandraConfigConstants.KCQL -> myKcql,
CassandraConfigConstants.ASSIGNED_TABLES -> "cassandra-table",
CassandraConfigConstants.POLL_INTERVAL -> "1000",
CassandraConfigConstants.FETCH_SIZE -> "500",
CassandraConfigConstants.BATCH_SIZE -> "800").asJava
}
val configSource = new CassandraConfigSource(configMap)
CassandraSettings.configureSource(configSource).head
}
def configureMe(kcqlIncrementMode: String): CassandraSourceSetting = {
val myKcql = s"INSERT INTO kafka-topic SELECT string_field, the_pk_field FROM cassandra-table PK the_pk_field BATCH=200 $kcqlIncrementMode"
val configMap = {
Map(
CassandraConfigConstants.KEY_SPACE -> "test",
CassandraConfigConstants.KCQL -> myKcql,
CassandraConfigConstants.ASSIGNED_TABLES -> "cassandra-table",
CassandraConfigConstants.POLL_INTERVAL -> "1000",
CassandraConfigConstants.FETCH_SIZE -> "500",
CassandraConfigConstants.BATCH_SIZE -> "800").asJava
}
val configSource = new CassandraConfigSource(configMap)
CassandraSettings.configureSource(configSource).head
}
def configureMeBucketTimeSeries(kcqlIncrementMode: String): CassandraSourceSetting = {
val myKcql = s"INSERT INTO kafka-topic SELECT string_field, the_pk_field FROM cassandra-table PK the_pk_field BATCH=200 $kcqlIncrementMode"
val configMap = {
Map(
CassandraConfigConstants.KEY_SPACE -> "test",
CassandraConfigConstants.KCQL -> myKcql,
CassandraConfigConstants.ASSIGNED_TABLES -> "cassandra-table",
CassandraConfigConstants.POLL_INTERVAL -> "1000",
CassandraConfigConstants.FETCH_SIZE -> "500",
CassandraConfigConstants.BATCH_SIZE -> "800",
CassandraConfigConstants.BUCKET_TIME_SERIES_MODE -> "MINUTE",
CassandraConfigConstants.BUCKET_TIME_SERIES_FORMAT -> "yyMMddHHmm",
).asJava
}
val configSource = new CassandraConfigSource(configMap)
CassandraSettings.configureSource(configSource).head
}
}
|
datamountaineer/stream-reactor
|
kafka-connect-cassandra/src/test/scala/com/datamountaineer/streamreactor/connect/cassandra/source/TestCqlGenerator.scala
|
Scala
|
apache-2.0
| 7,183
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.container
import java.util.concurrent.TimeUnit
import org.apache.samza.Partition
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.metrics.SlidingTimeWindowReservoir
import org.apache.samza.metrics.Timer
import org.apache.samza.system.IncomingMessageEnvelope
import org.apache.samza.system.SystemConsumers
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.task.TaskCoordinator.RequestScope
import org.apache.samza.task.ReadableCoordinator
import org.apache.samza.util.Clock
import org.junit.Assert._
import org.junit.Test
import org.mockito.Matchers
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.junit.AssertionsForJUnit
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{Matchers => ScalaTestMatchers}
class TestRunLoop extends AssertionsForJUnit with MockitoSugar with ScalaTestMatchers {
class StopRunLoop extends RuntimeException
val p0 = new Partition(0)
val p1 = new Partition(1)
val taskName0 = new TaskName(p0.toString)
val taskName1 = new TaskName(p1.toString)
val ssp0 = new SystemStreamPartition("testSystem", "testStream", p0)
val ssp1 = new SystemStreamPartition("testSystem", "testStream", p1)
val envelope0 = new IncomingMessageEnvelope(ssp0, "0", "key0", "value0")
val envelope1 = new IncomingMessageEnvelope(ssp1, "1", "key1", "value1")
def getMockTaskInstances: Map[TaskName, TaskInstance] = {
val ti0 = mock[TaskInstance]
when(ti0.systemStreamPartitions).thenReturn(Set(ssp0))
when(ti0.taskName).thenReturn(taskName0)
val ti1 = mock[TaskInstance]
when(ti1.systemStreamPartitions).thenReturn(Set(ssp1))
when(ti1.taskName).thenReturn(taskName1)
Map(taskName0 -> ti0, taskName1 -> ti1)
}
@Test
def testProcessMessageFromChooser {
val taskInstances = getMockTaskInstances
val consumers = mock[SystemConsumers]
val runLoop = new RunLoop(taskInstances, consumers, new SamzaContainerMetrics, TimeUnit.SECONDS.toMillis(1))
when(consumers.choose()).thenReturn(envelope0).thenReturn(envelope1).thenThrow(new StopRunLoop)
intercept[StopRunLoop] { runLoop.run }
verify(taskInstances(taskName0)).process(Matchers.eq(envelope0), anyObject, anyObject)
verify(taskInstances(taskName1)).process(Matchers.eq(envelope1), anyObject, anyObject)
runLoop.metrics.envelopes.getCount should equal(2L)
runLoop.metrics.nullEnvelopes.getCount should equal(0L)
}
@Test
def testNullMessageFromChooser {
val consumers = mock[SystemConsumers]
val map = getMockTaskInstances - taskName1 // This test only needs p0
val runLoop = new RunLoop(map, consumers, new SamzaContainerMetrics, TimeUnit.SECONDS.toMillis(1))
when(consumers.choose()).thenReturn(null).thenReturn(null).thenThrow(new StopRunLoop)
intercept[StopRunLoop] { runLoop.run }
runLoop.metrics.envelopes.getCount should equal(0L)
runLoop.metrics.nullEnvelopes.getCount should equal(2L)
}
@Test
def testWindowAndCommitAreCalledRegularly {
var now = 1400000000000L
val consumers = mock[SystemConsumers]
when(consumers.choose()).thenReturn(envelope0)
val runLoop = new RunLoop(
taskInstances = getMockTaskInstances,
consumerMultiplexer = consumers,
metrics = new SamzaContainerMetrics,
TimeUnit.SECONDS.toMillis(1),
windowMs = 60000, // call window once per minute
commitMs = 30000, // call commit twice per minute
clock = () => {
now += 100000000L // clock advances by 100 ms every time we look at it
if (now == 1690000000000L) throw new StopRunLoop // stop after 4 minutes 50 seconds
now
})
intercept[StopRunLoop] { runLoop.run }
verify(runLoop.taskInstances(taskName0), times(4)).window(anyObject)
verify(runLoop.taskInstances(taskName1), times(4)).window(anyObject)
verify(runLoop.taskInstances(taskName0), times(9)).commit
verify(runLoop.taskInstances(taskName1), times(9)).commit
}
@Test
def testCommitCurrentTaskManually {
val taskInstances = getMockTaskInstances
val consumers = mock[SystemConsumers]
val runLoop = new RunLoop(taskInstances, consumers, new SamzaContainerMetrics,
TimeUnit.SECONDS.toMillis(1), windowMs = -1, commitMs = -1)
when(consumers.choose()).thenReturn(envelope0).thenReturn(envelope1).thenThrow(new StopRunLoop)
stubProcess(taskInstances(taskName0), (envelope, coordinator) => coordinator.commit(RequestScope.CURRENT_TASK))
intercept[StopRunLoop] { runLoop.run }
verify(taskInstances(taskName0), times(1)).commit
verify(taskInstances(taskName1), times(0)).commit
}
@Test
def testCommitAllTasksManually {
val taskInstances = getMockTaskInstances
val consumers = mock[SystemConsumers]
val runLoop = new RunLoop(taskInstances, consumers, new SamzaContainerMetrics,
TimeUnit.SECONDS.toMillis(1), windowMs = -1, commitMs = -1)
when(consumers.choose()).thenReturn(envelope0).thenThrow(new StopRunLoop)
stubProcess(taskInstances(taskName0), (envelope, coordinator) => coordinator.commit(RequestScope.ALL_TASKS_IN_CONTAINER))
intercept[StopRunLoop] { runLoop.run }
verify(taskInstances(taskName0), times(1)).commit
verify(taskInstances(taskName1), times(1)).commit
}
@Test
def testShutdownOnConsensus {
val taskInstances = getMockTaskInstances
val consumers = mock[SystemConsumers]
val runLoop = new RunLoop(taskInstances, consumers, new SamzaContainerMetrics,
TimeUnit.SECONDS.toMillis(1), windowMs = -1, commitMs = -1)
when(consumers.choose()).thenReturn(envelope0).thenReturn(envelope0).thenReturn(envelope1)
stubProcess(taskInstances(taskName0), (envelope, coordinator) => coordinator.shutdown(RequestScope.CURRENT_TASK))
stubProcess(taskInstances(taskName1), (envelope, coordinator) => coordinator.shutdown(RequestScope.CURRENT_TASK))
runLoop.run
verify(taskInstances(taskName0), times(2)).process(Matchers.eq(envelope0), anyObject, anyObject)
verify(taskInstances(taskName1), times(1)).process(Matchers.eq(envelope1), anyObject, anyObject)
}
@Test
def testShutdownNow {
val taskInstances = getMockTaskInstances
val consumers = mock[SystemConsumers]
val runLoop = new RunLoop(taskInstances, consumers, new SamzaContainerMetrics
, TimeUnit.SECONDS.toMillis(1), windowMs = -1, commitMs = -1)
when(consumers.choose()).thenReturn(envelope0).thenReturn(envelope1)
stubProcess(taskInstances(taskName0), (envelope, coordinator) => coordinator.shutdown(RequestScope.ALL_TASKS_IN_CONTAINER))
runLoop.run
verify(taskInstances(taskName0), times(1)).process(anyObject, anyObject, anyObject)
verify(taskInstances(taskName1), times(0)).process(anyObject, anyObject, anyObject)
}
def anyObject[T] = Matchers.anyObject.asInstanceOf[T]
// Stub out TaskInstance.process. Mockito really doesn't make this easy. :(
def stubProcess(taskInstance: TaskInstance, process: (IncomingMessageEnvelope, ReadableCoordinator) => Unit) {
when(taskInstance.process(anyObject, anyObject, anyObject)).thenAnswer(new Answer[Unit]() {
override def answer(invocation: InvocationOnMock) {
val envelope = invocation.getArguments()(0).asInstanceOf[IncomingMessageEnvelope]
val coordinator = invocation.getArguments()(1).asInstanceOf[ReadableCoordinator]
process(envelope, coordinator)
}
})
}
@Test
def testUpdateTimerCorrectly {
var now = 0L
val consumers = mock[SystemConsumers]
when(consumers.choose()).thenReturn(envelope0)
val clock = new Clock {
var c = 0L
def currentTimeMillis: Long = {
c += 1L
c
}
}
val testMetrics = new SamzaContainerMetrics("test", new MetricsRegistryMap() {
override def newTimer(group: String, name: String) = {
newTimer(group, new Timer(name, new SlidingTimeWindowReservoir(300000, clock)))
}
})
val runLoop = new RunLoop(
taskInstances = getMockTaskInstances,
consumerMultiplexer = consumers,
metrics = testMetrics,
TimeUnit.SECONDS.toMillis(1),
windowMs = 1L,
commitMs = 1L,
clock = () => {
now += 1000000L
// clock() is called 15 times totally in RunLoop
// stop the runLoop after one run
if (now == 15000000L) throw new StopRunLoop
now
})
intercept[StopRunLoop] { runLoop.run }
testMetrics.chooseNs.getSnapshot.getAverage should equal(1000000L)
testMetrics.windowNs.getSnapshot.getAverage should equal(1000000L)
testMetrics.processNs.getSnapshot.getAverage should equal(1000000L)
testMetrics.commitNs.getSnapshot.getAverage should equal(1000000L)
now = 0L
intercept[StopRunLoop] { runLoop.run }
// after two loops
testMetrics.chooseNs.getSnapshot.getSize should equal(3)
testMetrics.windowNs.getSnapshot.getSize should equal(2)
testMetrics.processNs.getSnapshot.getSize should equal(2)
testMetrics.commitNs.getSnapshot.getSize should equal(2)
}
@Test
def testCommitAndWindowNotCalledImmediatelyOnStartUp {
var now = 0L
val consumers = mock[SystemConsumers]
val testMetrics = new SamzaContainerMetrics
val runLoop = new RunLoop(
taskInstances = getMockTaskInstances,
consumerMultiplexer = consumers,
metrics = testMetrics,
TimeUnit.SECONDS.toMillis(1),
commitMs = 1L,
windowMs = 1L,
clock = () => {
now += 1000000L
if (now == 13000000L) throw new StopRunLoop
now
}
)
intercept[StopRunLoop] {
runLoop.run
}
now = 0L
intercept[StopRunLoop] {
runLoop.run
}
// after 2 run loops number of commits and windows should be 1,
// as commit and window should not be called immediately on startup
testMetrics.commits.getCount should equal(1L)
testMetrics.windows.getCount should equal(1L)
}
@Test
def testGetSystemStreamPartitionToTaskInstancesMapping {
val ti0 = mock[TaskInstance]
val ti1 = mock[TaskInstance]
val ti2 = mock[TaskInstance]
when(ti0.systemStreamPartitions).thenReturn(Set(ssp0))
when(ti1.systemStreamPartitions).thenReturn(Set(ssp1))
when(ti2.systemStreamPartitions).thenReturn(Set(ssp1))
val mockTaskInstances = Map(taskName0 -> ti0, taskName1 -> ti1, new TaskName("2") -> ti2)
val runLoop = new RunLoop(mockTaskInstances, null, new SamzaContainerMetrics, TimeUnit.SECONDS.toMillis(1))
val expected = Map(ssp0 -> List(ti0), ssp1 -> List(ti1, ti2))
assertEquals(expected, runLoop.getSystemStreamPartitionToTaskInstancesMapping)
}
}
|
bharathkk/samza
|
samza-core/src/test/scala/org/apache/samza/container/TestRunLoop.scala
|
Scala
|
apache-2.0
| 11,518
|
package ch.epfl.lamp.grading
import java.io.{ File, PrintWriter }
import scala.Predef.{ any2stringadd => _, _ }
import scala.util.Try
import scala.collection.mutable
import scala.pickling.directSubclasses
import scala.pickling.Defaults._
import scala.pickling.json._
import Entry._
final case class GradingSummary(score: Int, maxScore: Int, feedback: String)
object GradingSummary {
class StringSplitExtractor(splitter: String) {
def unapplySeq(str: String) = Array.unapplySeq(str.split(splitter))
}
object ToInt {
def unapply(s: String) = Try(s.toInt).toOption
}
/** suiteName::suiteWeight */
object SuiteId extends StringSplitExtractor("::")
/** suiteName::testName::testWeight */
object TestId extends StringSplitExtractor("::")
final case class Suite(val name: String, val weight: Int,
var complete: Boolean = false,
val tests: mutable.Map[String, Test] = mutable.Map.empty)
final case class Test(val name: String, val weight: Int,
var failure: Option[String])
/** Given test completion state, compute the grade and provide feedback. */
def apply(suites: List[Suite]): GradingSummary = {
val score = suites.map { suite =>
suite.tests.values.map { test =>
test.failure.fold(test.weight)(_ => 0)
}.sum
}.sum
val maxScore = suites.map(_.weight).sum
val feedback = {
val sb = new StringBuilder
sb.append {
"Your solution achieved a testing score of %d out of %d.\\n\\n".format(score, maxScore)
}
if (score == maxScore)
sb.append("Great job!!!\\n\\n")
else {
sb.append("""Below you can see a short feedback for every test that failed,
|indicating the reason for the test failure and how many points
|you lost for each individual test.
|
|Tests that were aborted took too long too complete or crashed the
|JVM. Such crashes can arise due to infinite non-terminitaing
|loops or recursion (StackOverflowException) or excessive mamory
|consumption (OutOfMemoryException).
|
|""".stripMargin)
for {
s <- suites
t <- s.tests.values
msg <- t.failure
} {
sb.append(s"[Test Description] ${t.name}\\n")
sb.append(s"[Observed Error] $msg\\n")
sb.append(s"[Lost Points] ${t.weight}\\n\\n")
}
}
sb.toString
}
GradingSummary(score, maxScore, feedback)
}
/** Replay the event records from the file and construct test completion state. */
def apply(file: File): GradingSummary = {
val jsons = io.Source.fromFile(file).getLines.mkString("\\n").split("\\n\\n")
val entries = jsons.map(_.unpickle[Entry])
val suites = mutable.Map.empty[String, Suite]
entries.foreach {
case SuiteStart(SuiteId(name, ToInt(weight))) =>
suites += name -> Suite(name, weight)
case SuiteEnd(SuiteId(name, _)) =>
suites(name).complete = true
case TestStart(TestId(suiteName, name, ToInt(weight))) =>
suites(suiteName).tests +=
name -> Test(name, weight, failure = Some("test has been aborted"))
case TestSuccess(TestId(suiteName, name, _)) =>
suites(suiteName).tests(name).failure = None
case TestFailure(TestId(suiteName, name, _), msg: String) =>
suites(suiteName).tests(name).failure = Some(msg)
}
GradingSummary(suites.values.toList)
}
}
object GradingSummaryRunner {
def main(args: Array[String]): Unit = {
val inPath = args(0)
val outPath = args(1)
val summary = GradingSummary(new File(inPath))
val writer = new PrintWriter(new File(outPath))
try writer.print(summary.pickle.value)
finally writer.close()
}
}
|
sbt-coursera/scala-grading
|
runtime/src/main/scala/ch/epfl/lamp/grading/GradingSummary.scala
|
Scala
|
bsd-3-clause
| 3,830
|
package gapt.proofs.lk.reductions
import gapt.expr.formula.Bottom
import gapt.expr.formula.Top
import gapt.expr.subst.Substitution
import gapt.proofs.lk.LKProof
import gapt.proofs.lk.rules.AndLeftRule
import gapt.proofs.lk.rules.AndRightRule
import gapt.proofs.lk.rules.BottomAxiom
import gapt.proofs.lk.rules.CutRule
import gapt.proofs.lk.rules.ConversionLeftRule
import gapt.proofs.lk.rules.ConversionRightRule
import gapt.proofs.lk.rules.EqualityLeftRule
import gapt.proofs.lk.rules.EqualityRightRule
import gapt.proofs.lk.rules.ExistsLeftRule
import gapt.proofs.lk.rules.ExistsRightRule
import gapt.proofs.lk.rules.ForallLeftRule
import gapt.proofs.lk.rules.ForallRightRule
import gapt.proofs.lk.rules.ImpLeftRule
import gapt.proofs.lk.rules.ImpRightRule
import gapt.proofs.lk.rules.LogicalAxiom
import gapt.proofs.lk.rules.NegLeftRule
import gapt.proofs.lk.rules.NegRightRule
import gapt.proofs.lk.rules.OrLeftRule
import gapt.proofs.lk.rules.OrRightRule
import gapt.proofs.lk.rules.TopAxiom
import gapt.proofs.lk.rules.WeakeningLeftRule
import gapt.proofs.lk.rules.WeakeningRightRule
import gapt.proofs.SequentConnector
import gapt.proofs.guessPermutation
import gapt.proofs.lk.rules.macros.WeakeningMacroRule
object GradeReductionAxiomLeft extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( LogicalAxiom( _ ), _ ) => Some( cut.rightSubProof )
case _ => None
}
}
object GradeReductionAxiomRight extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( _, LogicalAxiom( _ ) ) => Some( cut.leftSubProof )
case _ => None
}
}
object GradeReductionAxiomTop extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( TopAxiom, WeakeningLeftRule( subProof, Top() ) ) if cut.rightSubProof.mainIndices.head == cut.aux2 =>
Some( subProof )
case _ => None
}
}
object GradeReductionAxiomBottom extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( WeakeningRightRule( subProof, Bottom() ), BottomAxiom ) if cut.leftSubProof.mainIndices.head == cut.aux1 =>
Some( subProof )
case _ => None
}
}
object GradeReductionWeakeningRight extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( l @ WeakeningRightRule( subProof, main ), r ) if l.mainIndices.head == cut.aux1 =>
// The left cut formula is introduced by weakening
Some( WeakeningMacroRule( subProof, cut.endSequent ) )
case _ => None
}
}
object GradeReductionWeakeningLeft extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( l, r @ WeakeningLeftRule( subProof, main ) ) if cut.aux2 == cut.rightSubProof.mainIndices.head =>
// The right cut formula is introduced by weakening
Some( WeakeningMacroRule( subProof, cut.endSequent ) )
case _ => None
}
}
object GradeReductionAnd extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( AndRightRule( llSubProof, a1, lrSubProof, a2 ), AndLeftRule( rSubProof, a3, a4 )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
val tmp = CutRule( lrSubProof, a2, rSubProof, a4 )
val o = tmp.getRightSequentConnector
Some( CutRule( llSubProof, a1, tmp, o.child( a3 ) ) )
case _ => None
}
}
object GradeReductionOr extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( OrRightRule( lSubProof, a1, a2 ), OrLeftRule( rlSubProof, a3, rrSubProof, a4 )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
val tmp = CutRule( lSubProof, a1, rlSubProof, a3 )
val o = tmp.getLeftSequentConnector
Some( CutRule( tmp, o.child( a2 ), rrSubProof, a4 ) )
case _ => None
}
}
object GradeReductionImp extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( ImpRightRule( lSubProof, a1, a2 ), ImpLeftRule( rlSubProof, a3, rrSubProof, a4 )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
val tmp = CutRule( rlSubProof, a3, lSubProof, a1 )
Some( CutRule( tmp, tmp.getRightSequentConnector.child( a2 ), rrSubProof, a4 ) )
case _ => None
}
}
object GradeReductionNeg extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( NegRightRule( lSubProof, a1 ), NegLeftRule( rSubProof, a2 )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
Some( CutRule( rSubProof, a2, lSubProof, a1 ) )
case _ => None
}
}
object GradeReductionForall extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( ForallRightRule( lSubProof, a1, eigen, _ ), ForallLeftRule( rSubProof, a2, f, term, _ )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
val lSubProofNew = Substitution( eigen, term )( lSubProof )
Some( CutRule( lSubProofNew, rSubProof, cut.rightSubProof.auxFormulas.head.head ) )
case _ => None
}
}
object GradeReductionExists extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( ExistsRightRule( lSubProof, a2, f, term, _ ), ExistsLeftRule( rSubProof, a1, eigen, _ )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
val rSubProofNew = Substitution( eigen, term )( rSubProof )
Some( CutRule( lSubProof, rSubProofNew, cut.leftSubProof.auxFormulas.head.head ) )
case _ => None
}
}
object GradeReductionDefinition extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( ConversionRightRule( lSubProof, a1, definition1 ), ConversionLeftRule( rSubProof, a2, definition2 )
) if cut.leftSubProof.mainIndices.head == cut.aux1 && cut.rightSubProof.mainIndices.head == cut.aux2 =>
Some( CutRule( lSubProof, a1, rSubProof, a2 ) )
case _ => None
}
}
object GradeReductionEquality extends CutReduction {
override def reduce( cut: CutRule ): Option[LKProof] =
( cut.leftSubProof, cut.rightSubProof ) match {
case ( eqL @ EqualityRightRule( _, _, _, _ ), eqR @ EqualityLeftRule( _, _, _, _ )
) if eqL.mainIndices.head == cut.aux1 && eqR.mainIndices.head == cut.aux2 && eqL.auxFormula == eqR.auxFormula =>
Some( CutRule( eqL.subProof, eqL.aux, eqR.subProof, eqR.aux ) )
case _ => None
}
}
object gradeReduction extends CutReduction {
def applyWithSequentConnector( cut: CutRule ): Option[( LKProof, SequentConnector )] =
this( cut ) map { guessPermutation( cut, _ ) }
/**
* Reduces the logical complexity of the cut formula or removes the cut.
*
* @param cut The proof to which the reduction is applied.
* @return A reduced proof or None if the reduction could not be applied to the given proof.
*/
def apply( cut: CutRule ): Option[LKProof] =
GradeReductionAxiomLeft.reduce( cut ) orElse
GradeReductionAxiomRight.reduce( cut ) orElse
GradeReductionAxiomTop.reduce( cut ) orElse
GradeReductionAxiomBottom.reduce( cut ) orElse
GradeReductionWeakeningLeft.reduce( cut ) orElse
GradeReductionWeakeningRight.reduce( cut ) orElse
GradeReductionAnd.reduce( cut ) orElse
GradeReductionOr.reduce( cut ) orElse
GradeReductionImp.reduce( cut ) orElse
GradeReductionNeg.reduce( cut ) orElse
GradeReductionForall.reduce( cut ) orElse
GradeReductionExists.reduce( cut ) orElse
GradeReductionDefinition.reduce( cut ) orElse
GradeReductionEquality.reduce( cut )
override def reduce( proof: CutRule ): Option[LKProof] = apply( proof )
}
|
gapt/gapt
|
core/src/main/scala/gapt/proofs/lk/reductions/gradeReduction.scala
|
Scala
|
gpl-3.0
| 8,702
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.models
import play.api.libs.json.Json
case class VerificationResult(error: Option[String])
object VerificationResult {
implicit val formats = Json.format[VerificationResult]
}
|
VlachJosef/bforms-frontend
|
app/uk/gov/hmrc/bforms/models/VerificationResult.scala
|
Scala
|
apache-2.0
| 813
|
package com.nekogata.backlogger.js_exports.commands.setting
import com.nekogata.backlogger.application.command.setting.SaveSettingCommand
import scala.scalajs.js.annotation.{JSExport, JSExportTopLevel}
@JSExportTopLevel("SaveSettingCommand")
class SaveSettingCommandImpl extends SaveSettingCommand {
@JSExport
override var apiKey: String = ""
@JSExport
override var spaceName: String = ""
@JSExport
override def execute(): Unit = super.execute()
}
|
Shinpeim/BackLogger
|
scala/src/main/scala/com/nekogata/backlogger/js_exports/commands/setting/SaveSettingCommandImpl.scala
|
Scala
|
mit
| 465
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import java.util.{Locale, TimeZone}
import scala.util.control.NonFatal
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.util.{fileToString, stringToFile}
import org.apache.spark.sql.execution.command.DescribeTableCommand
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.StructType
/**
* End-to-end test cases for SQL queries.
*
* Each case is loaded from a file in "spark/sql/core/src/test/resources/sql-tests/inputs".
* Each case has a golden result file in "spark/sql/core/src/test/resources/sql-tests/results".
*
* To run the entire test suite:
* {{{
* build/sbt "sql/test-only *SQLQueryTestSuite"
* }}}
*
* To run a single test file upon change:
* {{{
* build/sbt "~sql/test-only *SQLQueryTestSuite -- -z inline-table.sql"
* }}}
*
* To re-generate golden files, run:
* {{{
* SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite"
* }}}
*
* The format for input files is simple:
* 1. A list of SQL queries separated by semicolon.
* 2. Lines starting with -- are treated as comments and ignored.
*
* For example:
* {{{
* -- this is a comment
* select 1, -1;
* select current_date;
* }}}
*
* The format for golden result files look roughly like:
* {{{
* -- some header information
*
* -- !query 0
* select 1, -1
* -- !query 0 schema
* struct<...schema...>
* -- !query 0 output
* ... data row 1 ...
* ... data row 2 ...
* ...
*
* -- !query 1
* ...
* }}}
*/
class SQLQueryTestSuite extends QueryTest with SharedSQLContext {
private val regenerateGoldenFiles: Boolean = System.getenv("SPARK_GENERATE_GOLDEN_FILES") == "1"
private val baseResourcePath = {
// If regenerateGoldenFiles is true, we must be running this in SBT and we use hard-coded
// relative path. Otherwise, we use classloader's getResource to find the location.
if (regenerateGoldenFiles) {
java.nio.file.Paths.get("src", "test", "resources", "sql-tests").toFile
} else {
val res = getClass.getClassLoader.getResource("sql-tests")
new File(res.getFile)
}
}
private val inputFilePath = new File(baseResourcePath, "inputs").getAbsolutePath
private val goldenFilePath = new File(baseResourcePath, "results").getAbsolutePath
/** List of test cases to ignore, in lower cases. */
private val blackList = Set(
"blacklist.sql", // Do NOT remove this one. It is here to test the blacklist functionality.
".DS_Store" // A meta-file that may be created on Mac by Finder App.
// We should ignore this file from processing.
)
// Create all the test cases.
listTestCases().foreach(createScalaTestCase)
/** A test case. */
private case class TestCase(name: String, inputFile: String, resultFile: String)
/** A single SQL query's output. */
private case class QueryOutput(sql: String, schema: String, output: String) {
def toString(queryIndex: Int): String = {
// We are explicitly not using multi-line string due to stripMargin removing "|" in output.
s"-- !query $queryIndex\\n" +
sql + "\\n" +
s"-- !query $queryIndex schema\\n" +
schema + "\\n" +
s"-- !query $queryIndex output\\n" +
output
}
}
private def createScalaTestCase(testCase: TestCase): Unit = {
if (blackList.exists(t =>
testCase.name.toLowerCase(Locale.ROOT).contains(t.toLowerCase(Locale.ROOT)))) {
// Create a test case to ignore this case.
ignore(testCase.name) { /* Do nothing */ }
} else {
// Create a test case to run this case.
test(testCase.name) { runTest(testCase) }
}
}
/** Run a test case. */
private def runTest(testCase: TestCase): Unit = {
val input = fileToString(new File(testCase.inputFile))
// List of SQL queries to run
val queries: Seq[String] = {
val cleaned = input.split("\\n").filterNot(_.startsWith("--")).mkString("\\n")
// note: this is not a robust way to split queries using semicolon, but works for now.
cleaned.split("(?<=[^\\\\\\\\]);").map(_.trim).filter(_ != "").toSeq
}
// Create a local SparkSession to have stronger isolation between different test cases.
// This does not isolate catalog changes.
val localSparkSession = spark.newSession()
loadTestData(localSparkSession)
// Run the SQL queries preparing them for comparison.
val outputs: Seq[QueryOutput] = queries.map { sql =>
val (schema, output) = getNormalizedResult(localSparkSession, sql)
// We might need to do some query canonicalization in the future.
QueryOutput(
sql = sql,
schema = schema.catalogString,
output = output.mkString("\\n").trim)
}
if (regenerateGoldenFiles) {
// Again, we are explicitly not using multi-line string due to stripMargin removing "|".
val goldenOutput = {
s"-- Automatically generated by ${getClass.getSimpleName}\\n" +
s"-- Number of queries: ${outputs.size}\\n\\n\\n" +
outputs.zipWithIndex.map{case (qr, i) => qr.toString(i)}.mkString("\\n\\n\\n") + "\\n"
}
val resultFile = new File(testCase.resultFile)
val parent = resultFile.getParentFile
if (!parent.exists()) {
assert(parent.mkdirs(), "Could not create directory: " + parent)
}
stringToFile(resultFile, goldenOutput)
}
// Read back the golden file.
val expectedOutputs: Seq[QueryOutput] = {
val goldenOutput = fileToString(new File(testCase.resultFile))
val segments = goldenOutput.split("-- !query.+\\n")
// each query has 3 segments, plus the header
assert(segments.size == outputs.size * 3 + 1,
s"Expected ${outputs.size * 3 + 1} blocks in result file but got ${segments.size}. " +
s"Try regenerate the result files.")
Seq.tabulate(outputs.size) { i =>
QueryOutput(
sql = segments(i * 3 + 1).trim,
schema = segments(i * 3 + 2).trim,
output = segments(i * 3 + 3).trim
)
}
}
// Compare results.
assertResult(expectedOutputs.size, s"Number of queries should be ${expectedOutputs.size}") {
outputs.size
}
outputs.zip(expectedOutputs).zipWithIndex.foreach { case ((output, expected), i) =>
assertResult(expected.sql, s"SQL query did not match for query #$i\\n${expected.sql}") {
output.sql
}
assertResult(expected.schema, s"Schema did not match for query #$i\\n${expected.sql}") {
output.schema
}
assertResult(expected.output, s"Result did not match for query #$i\\n${expected.sql}") {
output.output
}
}
}
/** Executes a query and returns the result as (schema of the output, normalized output). */
private def getNormalizedResult(session: SparkSession, sql: String): (StructType, Seq[String]) = {
// Returns true if the plan is supposed to be sorted.
def needSort(plan: LogicalPlan): Boolean = plan match {
case _: Join | _: Aggregate | _: Generate | _: Sample | _: Distinct => false
case _: DescribeTableCommand => true
case PhysicalOperation(_, _, Sort(_, true, _)) => true
case _ => plan.children.iterator.exists(needSort)
}
try {
val df = session.sql(sql)
val schema = df.schema
val notIncludedMsg = "[not included in comparison]"
// Get answer, but also get rid of the #1234 expression ids that show up in explain plans
val answer = df.queryExecution.hiveResultString().map(_.replaceAll("#\\\\d+", "#x")
.replaceAll("Location.*/sql/core/", s"Location ${notIncludedMsg}sql/core/")
.replaceAll("Created By.*", s"Created By $notIncludedMsg")
.replaceAll("Created Time.*", s"Created Time $notIncludedMsg")
.replaceAll("Last Access.*", s"Last Access $notIncludedMsg"))
// If the output is not pre-sorted, sort it.
if (needSort(df.queryExecution.analyzed)) (schema, answer) else (schema, answer.sorted)
} catch {
case a: AnalysisException =>
// Do not output the logical plan tree which contains expression IDs.
// Also implement a crude way of masking expression IDs in the error message
// with a generic pattern "###".
val msg = if (a.plan.nonEmpty) a.getSimpleMessage else a.getMessage
(StructType(Seq.empty), Seq(a.getClass.getName, msg.replaceAll("#\\\\d+", "#x")))
case NonFatal(e) =>
// If there is an exception, put the exception class followed by the message.
(StructType(Seq.empty), Seq(e.getClass.getName, e.getMessage))
}
}
private def listTestCases(): Seq[TestCase] = {
listFilesRecursively(new File(inputFilePath)).map { file =>
val resultFile = file.getAbsolutePath.replace(inputFilePath, goldenFilePath) + ".out"
val absPath = file.getAbsolutePath
val testCaseName = absPath.stripPrefix(inputFilePath).stripPrefix(File.separator)
TestCase(testCaseName, absPath, resultFile)
}
}
/** Returns all the files (not directories) in a directory, recursively. */
private def listFilesRecursively(path: File): Seq[File] = {
val (dirs, files) = path.listFiles().partition(_.isDirectory)
files ++ dirs.flatMap(listFilesRecursively)
}
/** Load built-in test tables into the SparkSession. */
private def loadTestData(session: SparkSession): Unit = {
import session.implicits._
(1 to 100).map(i => (i, i.toString)).toDF("key", "value").createOrReplaceTempView("testdata")
((Seq(1, 2, 3), Seq(Seq(1, 2, 3))) :: (Seq(2, 3, 4), Seq(Seq(2, 3, 4))) :: Nil)
.toDF("arraycol", "nestedarraycol")
.createOrReplaceTempView("arraydata")
(Tuple1(Map(1 -> "a1", 2 -> "b1", 3 -> "c1", 4 -> "d1", 5 -> "e1")) ::
Tuple1(Map(1 -> "a2", 2 -> "b2", 3 -> "c2", 4 -> "d2")) ::
Tuple1(Map(1 -> "a3", 2 -> "b3", 3 -> "c3")) ::
Tuple1(Map(1 -> "a4", 2 -> "b4")) ::
Tuple1(Map(1 -> "a5")) :: Nil)
.toDF("mapcol")
.createOrReplaceTempView("mapdata")
}
private val originalTimeZone = TimeZone.getDefault
private val originalLocale = Locale.getDefault
override def beforeAll(): Unit = {
super.beforeAll()
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
RuleExecutor.resetTime()
}
override def afterAll(): Unit = {
try {
TimeZone.setDefault(originalTimeZone)
Locale.setDefault(originalLocale)
// For debugging dump some statistics about how much time was spent in various optimizer rules
logWarning(RuleExecutor.dumpTimeSpent())
} finally {
super.afterAll()
}
}
}
|
SHASHANKB/spark
|
sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala
|
Scala
|
apache-2.0
| 11,760
|
package akka.persistence.jdbc.migrator.integration
import akka.persistence.jdbc.migrator.MigratorSpec._
import akka.persistence.jdbc.migrator.JournalMigratorTest
class PostgresJournalMigratorTest extends JournalMigratorTest("postgres-application.conf") with PostgresCleaner
class MySQLJournalMigratorTest extends JournalMigratorTest("mysql-application.conf") with MysqlCleaner
class OracleJournalMigratorTest extends JournalMigratorTest("oracle-application.conf") with OracleCleaner
class SqlServerJournalMigratorTest extends JournalMigratorTest("sqlserver-application.conf") with SqlServerCleaner
|
dnvriend/akka-persistence-jdbc
|
migrator/src/it/scala/akka/persistence/jdbc/migrator/integration/JournalMigratorTest.scala
|
Scala
|
apache-2.0
| 603
|
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.spark
object XGBoostHelper {
def load(path: String, numClass: Int): XGBoostClassificationModel = {
import ml.dmlc.xgboost4j.scala.XGBoost
val _booster = XGBoost.loadModel(path)
new XGBoostClassificationModel("XGBClassifierModel", numClass, _booster)
}
def load(path: String): XGBoostRegressionModel = {
import ml.dmlc.xgboost4j.scala.XGBoost
val _booster = XGBoost.loadModel(path)
new XGBoostRegressionModel("XGBRegressorModel", _booster)
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/nnframes/XGBoostHelper.scala
|
Scala
|
apache-2.0
| 1,116
|
package com.sksamuel.elastic4s.searches.queries.funcscorer
import org.elasticsearch.index.query.functionscore.{ExponentialDecayFunctionBuilder, ScoreFunctionBuilders}
import com.sksamuel.exts.OptionImplicits._
import org.elasticsearch.search.MultiValueMode
case class ExponentialDecayScoreDefinition(field: String,
origin: String,
scale: String,
offset: Option[Any] = None,
decay: Option[Double] = None,
weight: Option[Double] = None,
multiValueMode: Option[MultiValueMode] = None)
extends ScoreFunctionDefinition {
override type B = ExponentialDecayFunctionBuilder
def builder = {
val builder = (offset, decay) match {
case (Some(o), Some(d)) => ScoreFunctionBuilders.exponentialDecayFunction(field, origin, scale, o, d)
case (Some(o), None) => ScoreFunctionBuilders.exponentialDecayFunction(field, origin, scale, o)
case _ => ScoreFunctionBuilders.exponentialDecayFunction(field, origin, scale)
}
weight.map(_.toFloat).foreach(builder.setWeight)
multiValueMode.foreach(builder.setMultiValueMode)
builder
}
def decay(decay: Double): ExponentialDecayScoreDefinition = copy(decay = decay.some)
def offset(offset: Any): ExponentialDecayScoreDefinition = copy(offset = offset.some)
}
|
ulric260/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/funcscorer/ExponentialDecayScoreDefinition.scala
|
Scala
|
apache-2.0
| 1,493
|
package kuaixue.scala.book.chapter_12
import java.util.Arrays
object ParamInfer extends App {
import scala.math._
val num = 3.14
val fun = ceil _
println("fun=" + fun) //<function1>
fun(num)
val ar = Array(3.14, 1.42, 2.0).map(fun)
println("array=" + (Arrays toString ar))
val triple = (x: Double) => 3 * x
triple(3)
val ar1 = Array(3.14, 1.42, 2.0).map((x: Double) => 3 * x)
val ar2 = Array(3.14, 1.42, 2.0).map { (x: Double) => 3 * x }
val ar3 = Array(3.14, 1.42, 2.0) map { (x: Double) => 3 * x }
def valueAtOneQuarter(f: (Double) => Double)= f(0.25)
valueAtOneQuarter(ceil _)
valueAtOneQuarter(sqrt _)
valueAtOneQuarter(x => x * 3)
valueAtOneQuarter(3 * _)
//val fun = 3 * (_ : Double)
(1 to 9).map("*" * _).foreach { println _}
(1 to 9).map("*" * _).foreach {x => println(x)}
(1 to 9).filter(_ % 2 == 0) //2 4 6 8
val val10 = (1 to 9).reduceLeft(_ * _) //(((1*2)*3)*4) ....; 每一个_、第二个_,分别代表两个参数
println(val10)
"Mary has a little lamb".split(" ").sortWith(_.length < _.length)
"Mary has a little lamb".split(" ").startsWith("a")
def mulOneAtATime(x: Int) = (y : Int) => x * y
mulOneAtATime(3)
mulOneAtATime(3)(4)
def mulOneAtATime1(x: Int)(y : Int) = x * y
val a = Array("hello", "world")
val b = Array("Hello", "World")
a.corresponds(b)(_.equalsIgnoreCase(_))
a.corresponds(b)(_ equalsIgnoreCase _)
}
|
slieer/scala-tutorials
|
src/main/scala/kuaixue/scala/book/chapter_12/ParamInfer.scala
|
Scala
|
apache-2.0
| 1,503
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling
import java.io.File
import java.net.URI
import java.text.MessageFormat
import ai.h2o.sparkling.backend.utils.RestApiUtils._
import ai.h2o.sparkling.backend.utils.{RestApiUtils, RestCommunication, RestEncodingUtils}
import ai.h2o.sparkling.backend.{H2OChunk, H2OJob, NodeDesc}
import ai.h2o.sparkling.extensions.rest.api.Paths
import ai.h2o.sparkling.extensions.rest.api.schema.{FinalizeFrameV3, InitializeFrameV3}
import ai.h2o.sparkling.utils.Base64Encoding
import ai.h2o.sparkling.utils.ScalaUtils.withResource
import com.google.gson.{Gson, JsonElement}
import org.apache.commons.io.IOUtils
import water.api.schemas3.FrameChunksV3.FrameChunkV3
import water.api.schemas3.FrameV3.ColV3
import water.api.schemas3._
import scala.util.Random
/**
* H2OFrame representation via Rest API
*/
class H2OFrame private (
val frameId: String,
val columns: Array[H2OColumn],
private[sparkling] val chunks: Array[H2OChunk])
extends Serializable
with RestEncodingUtils {
private val conf = H2OContext.ensure("H2OContext needs to be running in order to create H2OFrame").getConf
val columnNames: Array[String] = columns.map(_.name)
lazy val numberOfRows: Long = chunks.foldLeft(0L)((acc, chunk) => acc + chunk.numberOfRows)
def numberOfColumns: Int = columns.length
def convertColumnsToStrings(columns: Array[String]): H2OFrame = {
val indices = columnNames.zipWithIndex.toMap
val selectedIndices = columns.map { name =>
indices.getOrElse(name, throw new IllegalArgumentException(s"Column $name does not exist in the frame $frameId"))
}
convertColumnsToStrings(selectedIndices)
}
def convertColumnsToStrings(columnIndices: Array[Int]): H2OFrame = {
val nonExisting = columnIndices.filterNot(columns.indices.contains)
if (nonExisting.nonEmpty) {
throw new IllegalArgumentException(
s"Columns with indices ${nonExisting.mkString("[", ",", "]")} are not in the frame $frameId." +
s" The frame has ${columnNames.length} columns.")
}
val endpoint = getClusterEndpoint(conf)
if (columnIndices.isEmpty) {
this
} else {
val params = Map("ast" -> MessageFormat
.format(s"( assign {0} (:= {0} (as.character (cols {0} {1})) {1} []))", frameId, stringifyArray(columnIndices)))
val rapidsFrameV3 = update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
H2OFrame(rapidsFrameV3.key.name)
}
}
def convertColumnsToCategorical(columns: Array[String]): H2OFrame = {
val indices = columnNames.zipWithIndex.toMap
val selectedIndices = columns.map { name =>
indices.getOrElse(name, throw new IllegalArgumentException(s"Column $name does not exist in the frame $frameId"))
}
convertColumnsToCategorical(selectedIndices)
}
def convertColumnsToCategorical(columnIndices: Array[Int]): H2OFrame = {
val nonExisting = columnIndices.filterNot(columns.indices.contains(_))
if (nonExisting.nonEmpty) {
throw new IllegalArgumentException(
s"Columns with indices ${nonExisting.mkString("[", ",", "]")} are not in the frame $frameId." +
s" The frame has ${columnNames.length} columns.")
}
val endpoint = getClusterEndpoint(conf)
if (columnIndices.isEmpty) {
this
} else {
val params = Map(
"ast" -> MessageFormat
.format(s"( assign {0} (:= {0} (as.factor (cols {0} {1})) {1} []))", frameId, stringifyArray(columnIndices)))
val rapidsFrameV3 = update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
H2OFrame(rapidsFrameV3.key.name)
}
}
def split(splitRatios: Double*): Array[H2OFrame] = {
if (splitRatios.sum >= 1.0) {
throw new IllegalArgumentException("Split ratios must be lower than 1.0")
}
val endpoint = getClusterEndpoint(conf)
val params = Map("ratios" -> splitRatios.toArray, "dataset" -> frameId)
val splitFrameV3 = update[SplitFrameV3](endpoint, "3/SplitFrame", conf, params)
H2OJob(splitFrameV3.key.name).waitForFinishAndPrintProgress()
splitFrameV3.destination_frames.map(frameKey => H2OFrame(frameKey.name))
}
def add(another: H2OFrame): H2OFrame = {
val endpoint = getClusterEndpoint(conf)
val newFrameId = s"${frameId}_added_${another.frameId}"
val params = Map(
"ast" -> MessageFormat.format(s"( assign {0} (cbind {1} {2}))", newFrameId, frameId, another.frameId))
val rapidsFrameV3 = update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
H2OFrame(rapidsFrameV3.key.name)
}
def renameCol(originalColName: String, newColName: String): H2OFrame = {
if (!columnNames.contains(originalColName)) {
throw new IllegalArgumentException(s"Column '$originalColName' does not exist in the frame $frameId")
}
val endpoint = getClusterEndpoint(conf)
val originalColIdx = columnNames.indexOf(originalColName)
val params = Map(
"ast" -> MessageFormat
.format(s"( assign {0} (colnames= {0} {1} {2}))", frameId, originalColIdx.toString, s""""$newColName""""))
update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
columnNames.update(originalColIdx, newColName)
this
}
def subframe(columns: Array[String]): H2OFrame = {
val nonExistentColumns = columns.diff(columnNames)
if (nonExistentColumns.nonEmpty) {
throw new IllegalArgumentException(
s"The following columns are not available on the H2OFrame ${this.frameId}: ${nonExistentColumns.mkString(", ")}")
}
if (columns.sorted.sameElements(columnNames.sorted)) {
this
} else {
val endpoint = getClusterEndpoint(conf)
val colIndices = columns.map(columnNames.indexOf)
val newFrameId = s"${frameId}_subframe_${colIndices.mkString("_")}"
val params = Map(
"ast" -> MessageFormat.format(s"( assign {0} (cols {1} {2}))", newFrameId, frameId, stringifyArray(colIndices)))
val rapidsFrameV3 = update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
H2OFrame(rapidsFrameV3.key.name)
}
}
/**
* Delete this H2O Frame from the cluster
*/
def delete(): Unit = H2OFrame.deleteFrame(conf, frameId)
/**
* Left join this frame with another frame
*
* @param another right frame
* @return new frame
*/
def leftJoin(another: H2OFrame): H2OFrame =
join(another, allFromCurrent = true, allFromAnother = false, "radix")
/**
* Right join this frame with another frame
*
* @param another right frame
* @return new frame
*/
def rightJoin(another: H2OFrame): H2OFrame = {
join(another, allFromCurrent = false, allFromAnother = true, "radix")
}
/**
* Inner join this frame with another frame
*
* @param another right frame
* @return new frame
*/
def innerJoin(another: H2OFrame): H2OFrame =
join(another, allFromCurrent = false, allFromAnother = false, "radix")
/**
* Outer join this frame with another frame
*
* @param another right frame
* @return new frame
*/
def outerJoin(another: H2OFrame): H2OFrame = {
// Outer join is broken in H2O, simulate H2O's join via Spark for now
joinUsingSpark(another, "outer")
}
private def joinUsingSpark(another: H2OFrame, method: String): H2OFrame = {
val hc = H2OContext.ensure()
val currentFrame = hc.asSparkFrame(this.frameId)
val anotherFrame = hc.asSparkFrame(another.frameId)
val sameCols = anotherFrame.columns.intersect(currentFrame.columns)
val joined = currentFrame.join(anotherFrame, sameCols, method)
hc.asH2OFrame(joined)
}
/**
* Join this frame with another frame
*
* @param another right frame
* @param allFromCurrent all values from current frame
* @param allFromAnother all values from another frame
* @param method joining method
* @return
*/
private def join(
another: H2OFrame,
allFromCurrent: Boolean = false,
allFromAnother: Boolean = false,
method: String = "AUTO"): H2OFrame = {
val endpoint = getClusterEndpoint(conf)
val params = Map(
"ast" -> MessageFormat.format(
"( assign {0} (merge {1} {2} {3} {4} [] [] \\"{5}\\"))",
s"${this.frameId}_join_${Random.alphanumeric.take(5).mkString("")}",
this.frameId,
another.frameId,
if (allFromCurrent) "1" else "0",
if (allFromAnother) "1" else "0",
method.toLowerCase()))
val rapidsFrameV3 = update[RapidsFrameV3](endpoint, "99/Rapids", conf, params)
H2OFrame(rapidsFrameV3.key.name)
}
private[sparkling] def collectDoubles(colIdx: Int): Array[Double] = {
val endpoint = getClusterEndpoint(conf)
val frames = query[FramesV3](
endpoint,
s"/3/Frames/$frameId/columns/${columnNames(colIdx)}/summary",
conf,
Map("row_count" -> -1),
Seq((classOf[FrameV3], "chunk_summary"), (classOf[FrameV3], "distribution_summary")))
frames.frames.head.columns.head.data
}
private[sparkling] def collectDoubles(colName: String): Array[Double] = {
collectDoubles(columnNames.indexOf(colName))
}
private[sparkling] def collectLongs(colIdx: Int): Array[Long] = {
collectDoubles(colIdx).map(_.toLong)
}
private[sparkling] def collectLongs(colName: String): Array[Long] = {
collectLongs(columnNames.indexOf(colName))
}
private[sparkling] def collectInts(colIdx: Int): Array[Int] = {
collectDoubles(colIdx).map(_.toInt)
}
private[sparkling] def collectInts(colName: String): Array[Int] = {
collectInts(columnNames.indexOf(colName))
}
private[sparkling] def collectStrings(colIdx: Int): Array[String] = {
val endpoint = getClusterEndpoint(conf)
val frames = query[FramesV3](
endpoint,
s"/3/Frames/$frameId/columns/${columnNames(colIdx)}/summary",
conf,
Map("row_count" -> -1),
Seq((classOf[FrameV3], "chunk_summary"), (classOf[FrameV3], "distribution_summary")))
frames.frames.head.columns.head.string_data
}
}
object H2OFrame extends RestCommunication {
def apply(frameId: String): H2OFrame = {
val conf = H2OContext.ensure().getConf
getFrame(conf, frameId)
}
def apply(file: File): H2OFrame = apply(file: File, null)
def apply(file: File, columnNames: Array[String]): H2OFrame = {
val conf = H2OContext.ensure().getConf
val endpoint = getClusterEndpoint(conf)
val content = withResource(
readURLContent(
endpoint,
"POST",
"/3/PostFile",
conf,
Map.empty,
encodeParamsAsJson = false,
Some(file.getAbsolutePath))) { response =>
IOUtils.toString(response)
}
val gson = new Gson()
val unparsedFrameId =
gson.fromJson(content, classOf[JsonElement]).getAsJsonObject.getAsJsonPrimitive("destination_frame").getAsString
parse(endpoint, conf, Array(unparsedFrameId), columnNames)
}
def apply(uri: URI): H2OFrame = apply(uri: URI, null)
def apply(uri: URI, columnNames: Array[String]): H2OFrame = {
val scheme = uri.getScheme
if ((scheme == null || scheme == "file") && new File(uri).isFile) {
apply(new File(uri), columnNames)
} else {
val conf = H2OContext.ensure().getConf
val endpoint = RestApiUtils.getClusterEndpoint(conf)
val params = Map("paths" -> Array(uri.toString))
val importFilesV3 = RestApiUtils.update[ImportFilesMultiV3](endpoint, "/3/ImportFilesMulti", conf, params)
parse(endpoint, conf, importFilesV3.destination_frames, columnNames)
}
}
def listFrames(): Array[H2OFrame] = {
val conf = H2OContext.ensure().getConf
val endpoint = getClusterEndpoint(conf)
val frames = query[FramesListV3](endpoint, "/3/Frames", conf)
frames.frames.map(fr => H2OFrame(fr.frame_id.name))
}
def exists(frameId: String): Boolean = listFrames().map(_.frameId).contains(frameId)
private def getFrame(conf: H2OConf, frameId: String): H2OFrame = {
val endpoint = getClusterEndpoint(conf)
val frames = query[FramesV3](
endpoint,
s"/3/Frames/$frameId/summary",
conf,
Map("row_count" -> 0),
Seq((classOf[FrameV3], "chunk_summary"), (classOf[FrameV3], "distribution_summary")))
val frame = frames.frames(0)
val chunks = if (frame.rows == 0) {
Array.empty[H2OChunk]
} else {
val clusterNodes = getNodes(getClusterInfo(conf))
val frameChunks = query[FrameChunksV3](endpoint, s"/3/FrameChunks/$frameId", conf)
frameChunks.chunks.map(convertChunk(_, clusterNodes))
}
new H2OFrame(frame.frame_id.name, frame.columns.map(convertColumn), chunks)
}
private def deleteFrame(conf: H2OConf, frameId: String): Unit = {
val endpoint = getClusterEndpoint(conf)
delete[FramesV3](endpoint, s"3/Frames/$frameId", conf)
}
private def convertColumn(sourceColumn: ColV3): H2OColumn = {
H2OColumn(
name = sourceColumn.label,
dataType = H2OColumnType.fromString(sourceColumn.`type`),
min = sourceColumn.mins(0),
max = sourceColumn.maxs(0),
mean = sourceColumn.mean,
sigma = sourceColumn.sigma,
numberOfZeros = sourceColumn.zero_count,
numberOfMissingElements = sourceColumn.missing_count,
percentiles = sourceColumn.percentiles,
domain = sourceColumn.domain,
domainCardinality = sourceColumn.domain_cardinality)
}
private def convertChunk(sourceChunk: FrameChunkV3, clusterNodes: Array[NodeDesc]): H2OChunk = {
H2OChunk(
index = sourceChunk.chunk_id,
numberOfRows = sourceChunk.row_count,
location = clusterNodes(sourceChunk.node_idx))
}
private[sparkling] def initializeFrame(conf: H2OConf, frameId: String, columns: Array[String]): InitializeFrameV3 = {
val endpoint = getClusterEndpoint(conf)
val parameters = Map("key" -> frameId, "columns" -> columns)
update[InitializeFrameV3](endpoint, Paths.INITIALIZE_FRAME, conf, parameters)
}
private[sparkling] def finalizeFrame(
conf: H2OConf,
frameId: String,
rowsPerChunk: Array[Long],
columnTypes: Array[Byte]): FinalizeFrameV3 = {
val endpoint = getClusterEndpoint(conf)
val parameters = Map(
"key" -> frameId,
"rows_per_chunk" -> Base64Encoding.encode(rowsPerChunk),
"column_types" -> Base64Encoding.encode(columnTypes))
update[FinalizeFrameV3](endpoint, Paths.FINALIZE_FRAME, conf, parameters)
}
private def parse(
endpoint: URI,
conf: H2OConf,
unparsedFrameIds: Array[String],
columnNames: Array[String]): H2OFrame = {
val parseSetupParams = Map("source_frames" -> unparsedFrameIds, "column_names" -> columnNames)
val parseSetup = update[ParseSetupV3](endpoint, "/3/ParseSetup", conf, parseSetupParams)
val params = Map(
"source_frames" -> unparsedFrameIds,
"destination_frame" -> parseSetup.destination_frame,
"parse_type" -> parseSetup.parse_type,
"separator" -> parseSetup.separator,
"single_quotes" -> parseSetup.single_quotes,
"check_header" -> parseSetup.check_header,
"number_columns" -> parseSetup.number_columns,
"chunk_size" -> parseSetup.chunk_size,
"column_types" -> parseSetup.column_types,
"column_names" -> parseSetup.column_names,
"skipped_columns" -> parseSetup.skipped_columns,
"custom_non_data_line_markers" -> parseSetup.custom_non_data_line_markers,
"delete_on_done" -> true)
val parse = update[ParseV3](endpoint, "/3/Parse", conf, params)
val jobId = parse.job.key.name
H2OJob(jobId).waitForFinishAndPrintProgress()
H2OFrame(parse.destination_frame.name)
}
}
|
h2oai/sparkling-water
|
core/src/main/scala/ai/h2o/sparkling/H2OFrame.scala
|
Scala
|
apache-2.0
| 16,369
|
package com.oct.sclaav
import java.io.File
import com.oct.sclaav.cli.Parser
import com.oct.sclaav.visual.assembly.mosaic.{DoMosaicOfMosaics, DoMosaic}
import com.oct.sclaav.visual.search.MatchesByArgbAverageThresh
import org.slf4j.LoggerFactory
object Main {
val log = LoggerFactory.getLogger(getClass)
def main(args: Array[String]) {
Parser.parse(args) match {
case Some(config) =>
val inPath = config.in
val outPath = config.out
val maxSamples = config.maxSamplePhotos
val rows = config.rows
val cols = config.cols
val doManipulate = config.manipulate
val mode = config.mode
log.info(s"inPath: $inPath outPath: $outPath")
val files = new File(inPath.get).listFiles().filter(_.isFile).take(maxSamples.get)
mode match {
case Mode.SIMILARITY =>
log.info("doing similarity")
val target = config.singleTarget
val targetImg = new File(target.get)
val matches = MatchesByArgbAverageThresh(targetImg, files)
log.info(s"Found ${matches.length} matches!")
log.info(s"For ${target.get.getPath} the matches are: ")
matches.foreach(m => log.info(s"Match: ${m._1.getAbsolutePath}"))
// case Mode.SIMILARITY_PERMUTE =>
case Mode.MOSAIC_OF_MOSAICS =>
log.info("doing mosaic of mosaics")
val target = config.singleTarget
DoMosaicOfMosaics(new File(target.get), files, cols.get, rows.get, Some(new File(outPath.get)), doManipulate = doManipulate)
case Mode.MOSAIC_SINGLE_FILE =>
log.info("using single file target")
val target = config.singleTarget
DoMosaic(new File(target.get), files, cols.get, rows.get, Some(new File(outPath.get)), doManipulate = doManipulate)
case Mode.MOSAIC_PERMUTE_ALL_FILES =>
log.info("permuting all files in input dir")
for(file <- files) {
val controlFile = file
val sampleFiles = files.filter(_ != controlFile)
log.info(s"running with control image: ${controlFile.getName}")
DoMosaic(controlFile, sampleFiles, cols.get, rows.get, Some(new File(outPath.get)), doManipulate = doManipulate)
}
}
case None =>
log.error(s"Failed to parse args ${args.toList.toString}")
}
}
}
|
ogeagla/sclaav
|
src/main/scala/com/oct/sclaav/Main.scala
|
Scala
|
apache-2.0
| 2,429
|
package com.twitter.inject.app.internal
import com.google.inject.TypeLiteral
import com.google.inject.spi.TypeConverter
import com.twitter.util.{Duration => TwitterDuration}
import org.joda.time.Duration
private[app] object JodatimeDurationTypeConvertor extends TypeConverter {
def convert(value: String, toType: TypeLiteral[_]): Duration = {
val twitterDuration = TwitterDuration.parse(value)
Duration.millis(twitterDuration.inMillis)
}
}
|
syamantm/finatra
|
inject/inject-app/src/main/scala/com/twitter/inject/app/internal/JodatimeDurationTypeConvertor.scala
|
Scala
|
apache-2.0
| 454
|
package examples.miscellanea
import java.awt.{Dimension, Graphics2D, Point}
import rescala._
import scala.collection.mutable.ListBuffer
import scala.swing.{MainFrame, Panel, SimpleSwingApplication, Swing}
object PulsingCircle extends SimpleSwingApplication {
lazy val application = new PulsingCircle
def top = application.frame
override def main(args: Array[String]): Unit = {
super.main(args)
while (true) {
Swing onEDTWait {
application.base.transform(_ + 1)
application.frame.repaint
}
Thread sleep 20
}
}
}
class PulsingCircle {
/*
class Point(val x: Double,val y: Double) {
def move(delta: Delta) = new Point(x + delta.x, y + delta.y)
override def toString = "Point("+ x + "," + y +")"
}
class Line(m: Double, q: Double) {
def translate(delta: Float) = new Line(m, q + delta)
def rotate(delta: Float) = new Line(m + delta, q)
override def toString = "Line("+ m + "," + q +")"
}
*/
val toDraw = ListBuffer[Function1[Graphics2D, Unit]]()
type Delta = Point
class Oval(center: Signal[Point], radius: Signal[Int]) {
toDraw += ((g: Graphics2D) => {g.fillOval(center.now.x, center.now.y, radius.now, radius.now)})
override def toString = "Circle(" + center + "," + radius + ")"
}
val base = Var(0)
val time = Signal {base() % 200} // time is cyclic :)
val point1 = Signal {new Point(20 + time(), 20 + time())}
new Oval(point1, time)
val point2 = Signal {new Point(40 + time(), 80 + time())}
new Oval(point2, time)
val point3 = Signal {new Point(80 + time(), 40 + time())}
new Oval(point3, time)
val point4 = Signal {new Point(160 + time(), 160 + time())}
new Oval(point4, time)
// drawing code
def top = frame
val frame = new MainFrame {
contents = new Panel() {
preferredSize = new Dimension(600, 600)
override def paintComponent(g: Graphics2D): Unit = {
toDraw.map(x => x(g))
}
}
}
}
|
volkc/REScala
|
Examples/examples/src/main/scala/examples/miscellanea/PulsingCircle.scala
|
Scala
|
apache-2.0
| 1,967
|
package org.http4s
import cats._
import cats.data._
import cats.functor.Contravariant
import cats.implicits._
final case class QueryParameterKey(value: String) extends AnyVal
final case class QueryParameterValue(value: String) extends AnyVal
/**
* type class defining the key of a query parameter
* Usually used in conjunction with [[QueryParamEncoder]] and [[QueryParamDecoder]]
*/
trait QueryParam[T] {
def key: QueryParameterKey
}
object QueryParam {
/** summon an implicit [[QueryParam]] */
def apply[T](implicit ev: QueryParam[T]): QueryParam[T] = ev
def fromKey[T](k: String): QueryParam[T] = new QueryParam[T] {
def key: QueryParameterKey = QueryParameterKey(k)
}
}
trait QueryParamKeyLike[T] {
def getKey(t: T): QueryParameterKey
}
object QueryParamKeyLike {
def apply[T](implicit ev: QueryParamKeyLike[T]): QueryParamKeyLike[T] = ev
implicit val stringKey: QueryParamKeyLike[String] = new QueryParamKeyLike[String] {
override def getKey(t: String): QueryParameterKey = QueryParameterKey(t)
}
}
/**
* Type class defining how to encode a `T` as a [[QueryParameterValue]]s
* @see QueryParamCodecLaws
*/
trait QueryParamEncoder[T] { outer =>
def encode(value: T): QueryParameterValue
/** QueryParamEncoder is a contravariant functor. */
def contramap[U](f: U => T): QueryParamEncoder[U] =
new QueryParamEncoder[U] {
override def encode(value: U) =
outer.encode(f(value))
}
}
object QueryParamEncoder {
/** summon an implicit [[QueryParamEncoder]] */
def apply[T](implicit ev: QueryParamEncoder[T]): QueryParamEncoder[T] = ev
/** QueryParamEncoder is a contravariant functor. */
implicit val ContravariantQueryParamEncoder: Contravariant[QueryParamEncoder] =
new Contravariant[QueryParamEncoder] {
override def contramap[A, B](fa: QueryParamEncoder[A])(f: B => A) =
fa.contramap(f)
}
@deprecated("Use QueryParamEncoder[U].contramap(f)", "0.16")
def encodeBy[T, U](f: T => U)(
implicit qpe: QueryParamEncoder[U]
): QueryParamEncoder[T] =
qpe.contramap(f)
@deprecated("Use QueryParamEncoder[String].contramap(f)", "0.16")
def encode[T](f: T => String): QueryParamEncoder[T] =
stringQueryParamEncoder.contramap(f)
def fromShow[T](
implicit sh: Show[T]
): QueryParamEncoder[T] =
stringQueryParamEncoder.contramap(sh.show)
implicit lazy val booleanQueryParamEncoder: QueryParamEncoder[Boolean] = fromShow[Boolean]
implicit lazy val doubleQueryParamEncoder : QueryParamEncoder[Double] = fromShow[Double]
implicit lazy val floatQueryParamEncoder : QueryParamEncoder[Float] = fromShow[Float]
implicit lazy val shortQueryParamEncoder : QueryParamEncoder[Short] = fromShow[Short]
implicit lazy val intQueryParamEncoder : QueryParamEncoder[Int] = fromShow[Int]
implicit lazy val longQueryParamEncoder : QueryParamEncoder[Long] = fromShow[Long]
implicit lazy val stringQueryParamEncoder : QueryParamEncoder[String] =
new QueryParamEncoder[String] {
override def encode(value: String) =
QueryParameterValue(value)
}
}
/**
* Type class defining how to decode a [[QueryParameterValue]] into a `T`
* @see QueryParamCodecLaws
*/
trait QueryParamDecoder[T] { outer =>
def decode(value: QueryParameterValue): ValidatedNel[ParseFailure, T]
/** QueryParamDecoder is a covariant functor. */
def map[U](f: T => U): QueryParamDecoder[U] =
new QueryParamDecoder[U] {
override def decode(value: QueryParameterValue) =
outer.decode(value).map(f)
}
/** Use another decoder if this one fails. */
def orElse[U >: T](qpd: QueryParamDecoder[U]): QueryParamDecoder[U] =
new QueryParamDecoder[U] {
override def decode(value: QueryParameterValue) =
outer.decode(value) orElse qpd.decode(value)
}
}
object QueryParamDecoder {
/** summon an implicit [[QueryParamDecoder]] */
def apply[T](implicit ev: QueryParamDecoder[T]): QueryParamDecoder[T] = ev
def fromUnsafeCast[T](cast: QueryParameterValue => T)(typeName: String): QueryParamDecoder[T] = new QueryParamDecoder[T]{
def decode(value: QueryParameterValue): ValidatedNel[ParseFailure, T] =
Validated.catchNonFatal(cast(value)).leftMap(t =>
ParseFailure(s"Query decoding $typeName failed", t.getMessage)
).toValidatedNel
}
/** QueryParamDecoder is a covariant functor. */
implicit val FunctorQueryParamDecoder: Functor[QueryParamDecoder] =
new Functor[QueryParamDecoder] {
override def map[A, B](fa: QueryParamDecoder[A])(f: A => B) =
fa.map(f)
}
/** QueryParamDecoder is a MonoidK. */
implicit val PlusEmptyQueryParamDecoder: MonoidK[QueryParamDecoder] =
new MonoidK[QueryParamDecoder] {
def empty[A] =
fail[A]("Decoding failed.", "Empty decoder (always fails).")
def combineK[A](a: QueryParamDecoder[A], b: QueryParamDecoder[A]) =
a.orElse(b)
}
@deprecated("Use QueryParamEncoder[T].map(f)", "0.16")
def decodeBy[U, T](f: T => U)(
implicit qpd: QueryParamDecoder[T]
): QueryParamDecoder[U] =
qpd.map(f)
/** A decoder that always succeeds. */
def success[A](a: A): QueryParamDecoder[A] =
fromUnsafeCast[A](_ => a)("Success")
/** A decoder that always fails. */
def fail[A](sanitized: String, detail: String): QueryParamDecoder[A] =
new QueryParamDecoder[A] {
override def decode(value: QueryParameterValue) =
ParseFailure(sanitized, detail).invalidNel
}
implicit lazy val unitQueryParamDecoder: QueryParamDecoder[Unit] =
success(())
implicit lazy val booleanQueryParamDecoder: QueryParamDecoder[Boolean] =
fromUnsafeCast[Boolean](_.value.toBoolean)("Boolean")
implicit lazy val doubleQueryParamDecoder: QueryParamDecoder[Double] =
fromUnsafeCast[Double](_.value.toDouble)("Double")
implicit lazy val floatQueryParamDecoder: QueryParamDecoder[Float] =
fromUnsafeCast[Float](_.value.toFloat)("Float")
implicit lazy val shortQueryParamDecoder: QueryParamDecoder[Short] =
fromUnsafeCast[Short](_.value.toShort)("Short")
implicit lazy val intQueryParamDecoder: QueryParamDecoder[Int] =
fromUnsafeCast[Int](_.value.toInt)("Int")
implicit lazy val longQueryParamDecoder: QueryParamDecoder[Long] =
fromUnsafeCast[Long](_.value.toLong)("Long")
implicit lazy val charQueryParamDecoder: QueryParamDecoder[Char] = new QueryParamDecoder[Char]{
def decode(value: QueryParameterValue): ValidatedNel[ParseFailure, Char] =
if(value.value.size == 1) value.value.head.validNel
else ParseFailure("Failed to parse Char query parameter",
s"Could not parse ${value.value} as a Char").invalidNel
}
implicit lazy val stringQueryParamDecoder: QueryParamDecoder[String] = new QueryParamDecoder[String]{
def decode(value: QueryParameterValue): ValidatedNel[ParseFailure, String] =
value.value.validNel
}
}
|
ZizhengTai/http4s
|
core/src/main/scala/org/http4s/QueryParam.scala
|
Scala
|
apache-2.0
| 6,917
|
import scala.quoted._
import scala.quoted.staging._
object Test {
def main(args: Array[String]): Unit = {
given Toolbox = Toolbox.make(getClass.getClassLoader)
val f = run {
f1
}
println(f(42))
println(f(43))
}
def f1(using QuoteContext): Expr[Int => Int] = '{ n => ${Expr.betaReduce(f2)('n)} }
def f2(using QuoteContext): Expr[Int => Int] = '{ n => ${Expr.betaReduce(f3)('n)} }
def f3(using QuoteContext): Expr[Int => Int] = '{ n => ${Expr.betaReduce(f4)('n)} }
def f4(using QuoteContext): Expr[Int => Int] = '{ n => n }
}
|
som-snytt/dotty
|
tests/run-staging/quote-fun-app-1.scala
|
Scala
|
apache-2.0
| 565
|
package rros
////////////////////////////////////////////////////////////////////////////////
/**
* In order for other socket to use the RROS protocol, developer needs to implement
* a small adapter which implement the SocketInterface.
* Created by namnguyen on 3/1/15.
*/
trait SocketAdapter extends AutoCloseable{
//----------------------------------------------------------------------------
def send(message:String):Unit
//----------------------------------------------------------------------------
def close():Unit
//----------------------------------------------------------------------------
def += (socketListener: SocketListener):Unit =
this.synchronized {
_socketListeners = _socketListeners + socketListener
}
//----------------------------------------------------------------------------
def -= (socketListener: SocketListener):Unit =
this.synchronized {
_socketListeners = _socketListeners - socketListener
}
//----------------------------------------------------------------------------
def socketListeners = _socketListeners
//----------------------------------------------------------------------------
private var _socketListeners:Set[SocketListener] = Set()
//----------------------------------------------------------------------------
}
////////////////////////////////////////////////////////////////////////////////
|
namhnguyen/RROS
|
src/main/scala/rros/SocketAdapter.scala
|
Scala
|
apache-2.0
| 1,397
|
package concrete
package constraint
package semantic
import java.util
import bitvectors.BitVector
import cspom.compiler.QueueSet
import scala.collection.mutable
/**
* Constraint x(i) = j <=> x(j) = i
*/
final class Channel(x: Array[Variable], offset: Int) extends Constraint(x)
with StatefulConstraint[Map[Int, Set[Int]]] {
def advise(problemState: ProblemState, event: Event, pos: Int): Int = {
val fx = problemState(this)
val card = fx.size
card * card
}
def check(tuple: Array[Int]): Boolean = {
x.indices.forall { i =>
val j = tuple(i)
tuple(j - offset) == i + offset
}
}
def init(ps: ProblemState): Outcome =
ps.fold(x) { (ps, v) =>
ps.shaveDom(v, offset, offset + x.length - 1)
}
.andThen { ps =>
ps.updateState(this, computePredecessors(ps))
}
def computePredecessors(ps: ProblemState): Map[Int, Set[Int]] = {
val pred = new mutable.HashMap[Int, Set[Int]].withDefaultValue(Set())
for (p <- x.indices if !ps.dom(p).isAssigned; v <- ps.dom(p)) {
pred(v) += p + offset
}
pred.toMap
}
def revise(problemState: ProblemState, mod: BitVector): Outcome = {
//println(s"$mod of ${toString(problemState)}")
// fx contains "free variables"
val queue = new QueueSet(util.BitSet.valueOf(mod.words))
var pred = problemState(this)
var ps = problemState: Outcome
while (queue.nonEmpty && ps.isState) {
val m = queue.dequeue()
val dom = ps.dom(x(m))
for (p <- pred.getOrElse(m + offset, Iterator.empty)) {
if (!dom.contains(p)) {
val mod = ps.remove(x(p - offset), m + offset)
if (mod ne ps) {
queue.enqueue(p - offset)
ps = mod
}
pred = pred.updated(p, pred(p) - (m + offset))
.updated(m + offset, pred(m + offset) - p)
}
}
if (dom.isAssigned) {
val v = dom.singleValue
val mod = ps.tryAssign(x(v - offset), m + offset)
if (mod ne ps) {
queue.enqueue(v - offset)
ps = mod
}
pred -= m + offset
}
}
assert(ps.isState || pred == computePredecessors(ps.toState))
ps.updateState(this, pred)
//
// println(toString(problemState) + " " + mod)
//
// var fx = problemState(this)
//
// // Check whether variables have been assigned
// problemState.fold(mod) { (ps, p) =>
// val dom = ps.dom(x(p))
// if (dom.isAssigned) {
// fx -= p
//
// val v = dom.singleValue
//
// ps.tryAssign(x(v - offset), p + offset)
// } else {
// ps
// }
// }
// .fold(fx) { (ps, p) =>
//
// val r = ps.filterDom(x(p))(v => ps.dom(x(v - offset)).contains(p + offset))
// println(x(p).toString(ps) + " -> " + x(p).toString(r.toState))
// r
// }
// .updateState(this, fx)
}
override def toString(ps: ProblemState): String = {
s"channel $offset/${x.map(ps.dom(_).toString).mkString(", ")} fx = ${ps(this)}"
}
def simpleEvaluation: Int = 2
}
|
concrete-cp/concrete
|
src/main/scala/concrete/constraint/semantic/Channel.scala
|
Scala
|
lgpl-2.1
| 3,158
|
package eu.leadowl.rd.axon.quickstart.aggregate
import eu.leadowl.rd.axon.quickstart.commands.{MarkCompletedCommand, CreateToDoItemCommand}
import eu.leadowl.rd.axon.quickstart.events.{ToDoItemCompletedEvent, ToDoItemCreatedEvent}
import org.axonframework.scynapse.test.EventMatchers
import org.axonframework.test.Fixtures
import org.scalatest.FlatSpec
import org.specs2.matcher.ShouldMatchers
/**
* Created by leadowl on 13/06/2015.
*/
class ToDoItemTest extends FlatSpec with ShouldMatchers with EventMatchers {
"ToDoItem" should "be created" in new sut {
fixture
.given()
.when(CreateToDoItemCommand("todo1", "need to implement the aggregate"))
.expectEventsMatching( withPayloads(isEqualTo(ToDoItemCreatedEvent("todo1", "need to implement the aggregate"))))
}
it should "be marked completed" in new sut {
fixture
.given(ToDoItemCreatedEvent("todo1", "need to implement the aggregate"))
.when(MarkCompletedCommand("todo1"))
.expectEventsMatching( withPayloads(isEqualTo(ToDoItemCompletedEvent("todo1"))))
}
}
trait sut {
val fixture = {
val f = Fixtures.newGivenWhenThenFixture(classOf[ToDoItem])
f.setReportIllegalStateChange(false)
f
}
}
|
leadowl/axon-quickstart-scala
|
src/test/scala/eu/leadowl/rd/axon/quickstart/aggregate/ToDoItemTest.scala
|
Scala
|
mit
| 1,217
|
/**
* Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.peelframework.zookeeper.beans.system
import com.samskivert.mustache.Mustache
import org.peelframework.core.beans.system.Lifespan.Lifespan
import org.peelframework.core.beans.system.System
import org.peelframework.core.config.{Model, SystemConfig}
import org.peelframework.core.util.shell
import scala.collection.JavaConverters._
/** Wrapper class for Zookeper
*
* @param version Version of the system (e.g. "7.1")
* @param configKey The system configuration resides under `system.\\${configKey}`
* @param lifespan `Lifespan` of the system
* @param dependencies Set of dependencies that this system needs
* @param mc The moustache compiler to compile the templates that are used to generate property files for the system
*/
class Zookeeper(
version : String,
configKey : String,
lifespan : Lifespan,
dependencies : Set[System] = Set(),
mc : Mustache.Compiler) extends System("zookeeper", version, configKey, lifespan, dependencies, mc) {
override def configuration() = SystemConfig(config, {
val conf = config.getString(s"system.$configKey.path.config")
List(
SystemConfig.Entry[Model.Site](s"system.$configKey.config", s"$conf/zoo.cfg", templatePath("conf/zoo.cfg"), mc)
)
})
override def start(): Unit = if (!isUp) {
this.servers.foreach(start)
isUp = true
}
override def stop(): Unit = this.servers.foreach(stop)
def isRunning = this.servers.forall(s => isRunning(s))
def cli = new Zookeeper.Cli(config.getString(s"system.$configKey.path.home"), servers.head.host, config.getInt(s"system.$configKey.config.clientPort"))
private def start(s: Zookeeper.Server) = {
logger.info(s"Starting zookeeper at ${s.host}:${s.leaderPort}:${s.quorumPort}")
val user = config.getString(s"system.$configKey.user")
shell !
s"""
|ssh -t -t "$user@${s.host}" << SSHEND
| ${config.getString(s"system.$configKey.path.home")}/bin/zkServer.sh start
| echo ${s.id} > ${config.getString(s"system.$configKey.config.dataDir")}/myid
| exit
|SSHEND
""".stripMargin.trim
}
private def stop(s: Zookeeper.Server) = {
logger.info(s"Stopping zookeeper at ${s.host}:${s.leaderPort}:${s.quorumPort}")
val user = config.getString(s"system.$configKey.user")
shell ! s""" ssh $user@${s.host} ${config.getString(s"system.$configKey.path.home")}/bin/zkServer.sh stop """
}
private def isRunning(s: Zookeeper.Server) = {
val user = config.getString(s"system.$configKey.user")
val pidFile = s"${config.getString(s"system.$configKey.config.dataDir")}/zookeeper_server.pid"
(shell !! s""" ssh $user@${s.host} "ps -p `cat $pidFile` >/dev/null 2>&1; echo $$?" """).stripLineEnd.toInt == 0
}
private def servers = {
// grab servers from config
val serverConfigs = config.getConfig(s"system.$configKey.config.server").entrySet().asScala.map(v => v.getKey.substring(1, v.getKey.length() - 1) + ":" + v.getValue.unwrapped().toString)
// match and return valid server configs
serverConfigs.collect({
case Zookeeper.ServerConf(id, host, quorumPort, leaderPort) => Zookeeper.Server(id.toInt, host, quorumPort.toInt, leaderPort.toInt)
})
}
}
object Zookeeper {
val ServerConf = "(\\\\d+):([\\\\w\\\\-\\\\_\\\\.]+):(\\\\d+):(\\\\d+)".r
case class Server(id: Int, host: String, quorumPort: Int, leaderPort: Int)
class Cli(home: String, serverHost: String, serverPort: Int) {
def !(cmd: String) = shell ! s"$home/bin/zkCli.sh -server $serverHost:$serverPort $cmd"
def !!(cmd: String) = shell !! s"$home/bin/zkCli.sh -server $serverHost:$serverPort $cmd"
}
}
|
lauritzthamsen/peel
|
peel-extensions/src/main/scala/org/peelframework/zookeeper/beans/system/Zookeeper.scala
|
Scala
|
apache-2.0
| 4,281
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.RandomDataGenerator
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.expressions.UnsafeProjection
import org.apache.spark.sql.types._
/**
* Test suite for [[GenerateUnsafeRowJoiner]].
*
* There is also a separate [[GenerateUnsafeRowJoinerBitsetSuite]] that tests specifically
* concatenation for the bitset portion, since that is the hardest one to get right.
*/
class GenerateUnsafeRowJoinerSuite extends SparkFunSuite {
private val fixed = Seq(IntegerType)
private val variable = Seq(IntegerType, StringType)
test("simple fixed width types") {
testConcat(0, 0, fixed)
testConcat(0, 1, fixed)
testConcat(1, 0, fixed)
testConcat(64, 0, fixed)
testConcat(0, 64, fixed)
testConcat(64, 64, fixed)
}
test("randomized fix width types") {
for (i <- 0 until 20) {
testConcatOnce(Random.nextInt(100), Random.nextInt(100), fixed)
}
}
test("simple variable width types") {
testConcat(0, 0, variable)
testConcat(0, 1, variable)
testConcat(1, 0, variable)
testConcat(64, 0, variable)
testConcat(0, 64, variable)
testConcat(64, 64, variable)
}
test("randomized variable width types") {
for (i <- 0 until 10) {
testConcatOnce(Random.nextInt(100), Random.nextInt(100), variable)
}
}
test("SPARK-22508: GenerateUnsafeRowJoiner.create should not generate codes beyond 64KB") {
val N = 3000
testConcatOnce(N, N, variable)
}
private def testConcat(numFields1: Int, numFields2: Int, candidateTypes: Seq[DataType]): Unit = {
for (i <- 0 until 10) {
testConcatOnce(numFields1, numFields2, candidateTypes)
}
}
private def testConcatOnce(numFields1: Int, numFields2: Int, candidateTypes: Seq[DataType]) {
info(s"schema size $numFields1, $numFields2")
val random = new Random()
val schema1 = RandomDataGenerator.randomSchema(random, numFields1, candidateTypes)
val schema2 = RandomDataGenerator.randomSchema(random, numFields2, candidateTypes)
// Create the converters needed to convert from external row to internal row and to UnsafeRows.
val internalConverter1 = CatalystTypeConverters.createToCatalystConverter(schema1)
val internalConverter2 = CatalystTypeConverters.createToCatalystConverter(schema2)
val converter1 = UnsafeProjection.create(schema1)
val converter2 = UnsafeProjection.create(schema2)
// Create the input rows, convert them into UnsafeRows.
val extRow1 = RandomDataGenerator.forType(schema1, nullable = false).get.apply()
val extRow2 = RandomDataGenerator.forType(schema2, nullable = false).get.apply()
val row1 = converter1.apply(internalConverter1.apply(extRow1).asInstanceOf[InternalRow])
val row2 = converter2.apply(internalConverter2.apply(extRow2).asInstanceOf[InternalRow])
// Run the joiner.
val mergedSchema = StructType(schema1 ++ schema2)
val concater = GenerateUnsafeRowJoiner.create(schema1, schema2)
val output = concater.join(row1, row2)
// Test everything equals ...
for (i <- mergedSchema.indices) {
if (i < schema1.size) {
assert(output.isNullAt(i) === row1.isNullAt(i))
if (!output.isNullAt(i)) {
assert(output.get(i, mergedSchema(i).dataType) === row1.get(i, mergedSchema(i).dataType))
}
} else {
assert(output.isNullAt(i) === row2.isNullAt(i - schema1.size))
if (!output.isNullAt(i)) {
assert(output.get(i, mergedSchema(i).dataType) ===
row2.get(i - schema1.size, mergedSchema(i).dataType))
}
}
}
}
}
|
ron8hu/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateUnsafeRowJoinerSuite.scala
|
Scala
|
apache-2.0
| 4,565
|
package com.ldaniels528.ricochet.entity
import java.util.Random
import com.ldaniels528.ricochet.Direction
import com.ldaniels528.ricochet.Direction._
/**
* Represents a moving entity within the virtual world
* @author "Lawrence Daniels" <lawrence.daniels@gmail.com>
*/
trait MovingEntity extends Entity {
private val random = new Random()
/**
* Returns the opposite of the given direction
* @param direction the given [[Direction]]
* @return the opposite direction
*/
protected def opposite(direction: Direction): Direction = {
direction match {
case NE => SW
case NW => SE
case SE => NW
case SW => NE
}
}
/**
* Returns a random direction (limited to the given sequence of directions)
* @return a random direction
*/
protected def randomDirection(directions: Direction*): Direction = {
directions(random.nextInt(directions.length))
}
}
|
ldaniels528/ricochet
|
src/main/scala/com/ldaniels528/ricochet/entity/MovingEntity.scala
|
Scala
|
apache-2.0
| 916
|
package com.actors
import akka.actor.{Actor, ActorRef}
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSubMediator._
import system.ontologies.Topic
import system.ontologies.messages.Location._
import system.ontologies.messages.MessageType.Init
import system.ontologies.messages.{AriadneMessage, Greetings, Location, MessageContent}
/**
* This class gives a common template for a Akka Subscriber.
*
* The actual implementation of this Template is partly provided by the BasicActor superclass,
* partly by the topics abstract val, where must be placed the topics to which the Actor need to be subscribed.
*
* The subscription is automatically done during the preStart phase.
*
* Created by Alessandro on 01/07/2017.
*/
abstract class TemplateSubscriber(mediator: ActorRef) extends TemplateActor {
val cluster: Cluster = akka.cluster.Cluster(context.system)
protected val topics: Set[Topic[MessageContent]] // To Override Necessarily
private var ackTopicReceived: Int = 0
override protected def init(args: List[String]): Unit = {
super.init(args)
mediator ! Put(self) // Point 2 Point Messaging with other Actors of the cluster
topics.foreach(topic => mediator ! Subscribe(topic, self))
}
override protected def receptive: Receive = {
case SubscribeAck(Subscribe(topic, _, me)) if me == self =>
log.info("{} Successfully Subscribed to {}", name, topic)
ackTopicReceived = ackTopicReceived + 1
if (ackTopicReceived == topics.size) {
this.context.become(subscribed, discardOld = true)
siblings ! AriadneMessage(Init, Init.Subtype.Greetings,
Location.Cell >> Location.Self, Greetings(List(ClusterMembersListener.greetings)))
log.info("I've become Subscribed!")
unstashAll
}
case _: AriadneMessage[_] => stash
case _ => desist _
}
protected def subscribed: Actor.Receive
}
/**
* This class gives a common template for a Akka Publisher
*
*/
abstract class TemplatePublisher(mediator: ActorRef) extends TemplateActor {
val cluster: Cluster = akka.cluster.Cluster(context.system)
// Point 2 Point Messaging with other Actors of the cluster
override protected def init(args: List[String]): Unit = {
super.init(args)
mediator ! Put(self) // Point 2 Point Messaging with other Actors of the cluster
}
}
|
albertogiunta/arianna
|
src/main/scala/com/actors/BasicPubSub.scala
|
Scala
|
gpl-3.0
| 2,541
|
package com.websudos.phantom.builder.ops
import org.scalatest.{Matchers, FlatSpec}
class OperatorsTest extends FlatSpec with Matchers {
}
|
nkijak/phantom
|
phantom-dsl/src/test/scala/com/websudos/phantom/builder/ops/OperatorsTest.scala
|
Scala
|
bsd-2-clause
| 141
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.result
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import org.apache.kudu.client.RowResult
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.filter.factory.FastFilterFactory
import org.locationtech.geomesa.filter.filterToString
import org.locationtech.geomesa.kudu.result.KuduResultAdapter.KuduResultAdapterSerialization
import org.locationtech.geomesa.kudu.schema.KuduIndexColumnAdapter.{FeatureIdAdapter, VisibilityAdapter}
import org.locationtech.geomesa.kudu.schema.{KuduSimpleFeatureSchema, RowResultSimpleFeature}
import org.locationtech.geomesa.security.{SecurityUtils, VisibilityEvaluator}
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.ByteBuffers.ExpandingByteBuffer
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Converts rows to simple features, first filtering by a predicate
*
* @param sft simple feature type
* @param auths authorizations
* @param ecql filter
*/
case class FilteringAdapter(sft: SimpleFeatureType, auths: Seq[Array[Byte]], ecql: Filter) extends KuduResultAdapter {
private val schema = KuduSimpleFeatureSchema(sft)
private val feature = new RowResultSimpleFeature(sft, FeatureIdAdapter, schema.adapters)
override val columns: Seq[String] =
Seq(FeatureIdAdapter.name, VisibilityAdapter.name) ++ schema.schema.map(_.getName)
override def result: SimpleFeatureType = sft
override def adapt(results: CloseableIterator[RowResult]): CloseableIterator[SimpleFeature] = {
results.flatMap { row =>
val vis = VisibilityAdapter.readFromRow(row)
if ((vis == null || VisibilityEvaluator.parse(vis).evaluate(auths)) &&
{ feature.setRowResult(row); ecql.evaluate(feature) }) {
SecurityUtils.setFeatureVisibility(feature, vis)
Iterator.single(feature)
} else {
CloseableIterator.empty
}
}
}
override def toString: String =
s"FilterAdapter(sft=${sft.getTypeName}{${SimpleFeatureTypes.encodeType(sft)}}, " +
s"filter=${filterToString(ecql)}, auths=${auths.map(new String(_, StandardCharsets.UTF_8)).mkString(",")})"
}
object FilteringAdapter extends KuduResultAdapterSerialization[FilteringAdapter] {
override def serialize(adapter: FilteringAdapter, bb: ExpandingByteBuffer): Unit = {
bb.putString(adapter.sft.getTypeName)
bb.putString(SimpleFeatureTypes.encodeType(adapter.sft, includeUserData = true))
bb.putInt(adapter.auths.length)
adapter.auths.foreach(bb.putBytes)
bb.putString(ECQL.toCQL(adapter.ecql))
}
override def deserialize(bb: ByteBuffer): FilteringAdapter = {
import org.locationtech.geomesa.utils.io.ByteBuffers.RichByteBuffer
val sft = SimpleFeatureTypes.createType(bb.getString, bb.getString)
val auths = Seq.fill(bb.getInt)(bb.getBytes)
val ecql = FastFilterFactory.toFilter(sft, bb.getString)
FilteringAdapter(sft, auths, ecql)
}
}
|
elahrvivaz/geomesa
|
geomesa-kudu/geomesa-kudu-datastore/src/main/scala/org/locationtech/geomesa/kudu/result/FilteringAdapter.scala
|
Scala
|
apache-2.0
| 3,574
|
package io.getquill.mysql
import io.getquill.MonixSpec
import monix.eval.Task
class MonixJdbcContextSpec extends MonixSpec {
val context = testContext
import testContext._
"provides transaction support" - {
"success" in {
(for {
_ <- testContext.run(qr1.delete)
_ <- testContext.transaction {
testContext.run(qr1.insert(_.i -> 33))
}
r <- testContext.run(qr1)
} yield r).runSyncUnsafe().map(_.i) mustEqual List(33)
}
"success - stream" in {
(for {
_ <- testContext.run(qr1.delete)
seq <- testContext.transaction {
for {
_ <- testContext.run(qr1.insert(_.i -> 33))
s <- accumulate(testContext.stream(qr1))
} yield s
}
r <- testContext.run(qr1)
} yield (seq.map(_.i), r.map(_.i))).runSyncUnsafe() mustEqual ((List(33), List(33)))
}
"failure" in {
(for {
_ <- testContext.run(qr1.delete)
e <- testContext.transaction {
Task.sequence(Seq(
testContext.run(qr1.insert(_.i -> 18)),
Task.eval {
throw new IllegalStateException
}
))
}.onErrorHandleWith {
case e: Exception => Task(e.getClass.getSimpleName)
}
r <- testContext.run(qr1)
} yield (e, r.isEmpty)).runSyncUnsafe() mustEqual (("IllegalStateException", true))
}
"nested" in {
(for {
_ <- testContext.run(qr1.delete)
_ <- testContext.transaction {
testContext.transaction {
testContext.run(qr1.insert(_.i -> 33))
}
}
r <- testContext.run(qr1)
} yield r).runSyncUnsafe().map(_.i) mustEqual List(33)
}
"prepare" in {
testContext.prepareParams(
"select * from Person where name=? and age > ?", ps => (List("Sarah", 127), ps)
).runSyncUnsafe() mustEqual List("127", "'Sarah'")
}
}
}
|
mentegy/quill
|
quill-jdbc-monix/src/test/scala/io/getquill/mysql/MonixJdbcContextSpec.scala
|
Scala
|
apache-2.0
| 1,950
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util
import scala.collection.JavaConverters._
import org.apache.spark.sql._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table, TableCapability}
import org.apache.spark.sql.connector.write.{LogicalWriteInfo, SupportsTruncate, WriteBuilder}
import org.apache.spark.sql.connector.write.streaming.StreamingWrite
import org.apache.spark.sql.execution.streaming.sources.ConsoleWrite
import org.apache.spark.sql.internal.connector.{SimpleTableProvider, SupportsStreamingUpdateAsAppend}
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, DataSourceRegister}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
case class ConsoleRelation(override val sqlContext: SQLContext, data: DataFrame)
extends BaseRelation {
override def schema: StructType = data.schema
}
class ConsoleSinkProvider extends SimpleTableProvider
with DataSourceRegister
with CreatableRelationProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = {
ConsoleTable
}
def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
// Number of rows to display, by default 20 rows
val numRowsToShow = parameters.get("numRows").map(_.toInt).getOrElse(20)
// Truncate the displayed data if it is too long, by default it is true
val isTruncated = parameters.get("truncate").map(_.toBoolean).getOrElse(true)
data.show(numRowsToShow, isTruncated)
ConsoleRelation(sqlContext, data)
}
def shortName(): String = "console"
}
object ConsoleTable extends Table with SupportsWrite {
override def name(): String = "console"
override def schema(): StructType = StructType(Nil)
override def capabilities(): util.Set[TableCapability] = {
Set(TableCapability.STREAMING_WRITE).asJava
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new WriteBuilder with SupportsTruncate with SupportsStreamingUpdateAsAppend {
private val inputSchema: StructType = info.schema()
// Do nothing for truncate. Console sink is special and it just prints all the records.
override def truncate(): WriteBuilder = this
override def buildForStreaming(): StreamingWrite = {
assert(inputSchema != null)
new ConsoleWrite(inputSchema, info.options)
}
}
}
}
|
witgo/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/console.scala
|
Scala
|
apache-2.0
| 3,295
|
package breeze.integrate
import org.scalatest.FunSuite
import breeze.integrate
import breeze.linalg._
import breeze.numerics._
import scala.math.Pi
/**
*
* @author jaketimothy
**/
class NonstiffOdeTest extends FunSuite {
// allowable percent difference
val limit = 0.005
// Euler Equations vs. Matlab ode113
// http://www.mathworks.com/help/matlab/math/ordinary-differential-equations.html#f1-40077
val f = (y: DenseVector[Double], t: Double) => DenseVector(y(1) * y(2), -y(0) * y(2), -0.51 * y(0) * y(1))
val y0 = DenseVector(0.0, 1.0, 1.0)
val t = Array(0.0, 12.0)
val ans = DenseVector(-0.707186602982020, -0.709046793058523, 0.863898186330983)
test("hall54") {
val integrator = new HighamHall54Integrator(0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
test("dorpri54") {
val integrator = new DormandPrince54Integrator(0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
test("dorpri853") {
val integrator = new DormandPrince853Integrator(0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
test("stoer") {
val integrator = new GraggBulirschStoerIntegrator(0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
test("bashforth5") {
val integrator = new AdamsBashforthIntegrator(5, 0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
test("moulton5") {
val integrator = new AdamsMoultonIntegrator(5, 0.0, 1.0)
val steps = integrator.integrate(f, y0, t)
assert(abs((steps(1)(0) - ans(0)) / ans(0)) < limit)
assert(abs((steps(1)(1) - ans(1)) / ans(1)) < limit)
assert(abs((steps(1)(2) - ans(2)) / ans(2)) < limit)
}
// test("incompatible dimensions") {
// intercept[Exception] {
// integrate.RungeKuttaOdeSolver(DormandPrinceTableau, f, DenseVector(1.0, 0.0), Array(0.0, scala.math.Pi), relTol = DenseVector(1.0))
// }
// intercept[Exception] {
// integrate.RungeKuttaOdeSolver(DormandPrinceTableau, f, DenseVector(1.0, 0.0), Array(0.0, scala.math.Pi), absTol = DenseVector(1.0))
// }
// }
}
|
claydonkey/breeze
|
math/src/test/scala/breeze/integrate/NonstiffOdeTest.scala
|
Scala
|
apache-2.0
| 2,926
|
package gx.answers
/** Question:
* http://stackoverflow.com/questions/40149010/does-this-specific-exercise-lend-itself-well-to-a-functional-style-design-patt/40150083#40150083
*/
object answer001 extends App {
val input = List(Map("abc" -> "123", "xy" -> "yz", "s12" -> "13"), Map("abc" -> "1", "s" -> "33"))
val keys = input.flatMap(_.keys).toSet
// val keyvalues = input.map(kvs => keys.map(k => (k->kvs.getOrElse(k,""))).toMap)
// val values = keyvalues.map(_.values)
val values = input.map(kvs => keys.toList.map(kvs.getOrElse(_, "")))
val result = keys.mkString(",") + "\\n" + values.map(_.mkString(",")).mkString("\\n")
println(result)
}
|
josephguan/scala-stackoverflow
|
src/main/scala/gx/answers/answer001.scala
|
Scala
|
apache-2.0
| 663
|
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.runtime
package iterative.listener
import scala.util.{ Failure, Success, Try }
import org.slf4j.LoggerFactory
import com.asakusafw.spark.runtime.RoundContext
class Logger extends IterativeBatchExecutor.Listener {
private val Logger = LoggerFactory.getLogger(getClass)
override def onExecutorStart(): Unit = {
if (Logger.isInfoEnabled) {
Logger.info("IterativaBatchExecutor started.")
}
}
override def onRoundSubmitted(rc: RoundContext): Unit = {
if (Logger.isInfoEnabled) {
Logger.info(s"Round[${rc}] is submitted.")
}
}
override def onRoundStart(rc: RoundContext): Unit = {
if (Logger.isInfoEnabled) {
Logger.info(s"Round[${rc}] started.")
}
}
override def onRoundCompleted(rc: RoundContext, result: Try[Unit]): Unit = {
result match {
case Success(_) =>
if (Logger.isInfoEnabled) {
Logger.info(s"Round[${rc}] successfully completed.")
}
case Failure(t) =>
if (Logger.isErrorEnabled) {
Logger.error(s"Round[${rc}] failed.", t)
}
}
}
override def onExecutorStop(): Unit = {
if (Logger.isInfoEnabled) {
Logger.info("IterativaBatchExecutor stopped.")
}
}
}
|
ueshin/asakusafw-spark
|
extensions/iterativebatch/runtime/iterative/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/runtime/iterative/listener/Logger.scala
|
Scala
|
apache-2.0
| 1,881
|
package models
// import scala.slick.driver.Postgres.simple._
import service.EncryptionService
// class User(val email: String, var name: String, var description: String) extends DomainEntity {
// var encryptedPassword: String = ""
// def encryptPassword(password: String) = {
// encryptedPassword = EncryptionService.encryptTest(password)
// }
// }
case class User(id: Long,
email: String,
tenant: Option[Tenant] = None,
isActive: Boolean = true
)
|
hsihealth/aim_prototype
|
src/main/scala/models/user.scala
|
Scala
|
apache-2.0
| 527
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.entilzha.spark.s3
import scala.language.implicitConversions
import org.apache.spark.SparkContext
object S3Context {
def apply(sc: SparkContext) = new S3Context(sc)
def apply(sc: SparkContext, defaultNumPartitions: Int) = new S3Context(sc, defaultNumPartitions)
object implicits {
implicit def sparkContextToS3ContextWrapper(sc: SparkContext): S3ContextWrapper = {
new S3ContextWrapper(sc)
}
}
class S3ContextWrapper(@transient sc: SparkContext) {
def s3 = S3Context(sc)
def s3(defaultNumPartitions: Int) = S3Context(sc, defaultNumPartitions)
}
}
class S3Context(@transient sc: SparkContext, defaultNumPartitions: Int) extends Serializable {
def this(@transient sc: SparkContext) {
this(sc, sc.defaultParallelism)
}
/**
* Basic entry point to Amazon S3 access. Requires a S3 bucket and at least one prefix to match on
*
* @param bucket Bucket to read from
* @param prefixes List of prefixes to match files with
* @return [[S3RDD]] with contents of files with bucket and prefixes
*/
@scala.annotation.varargs
def textFileByPrefix(bucket: String, prefixes: String*): S3RDD = {
new S3RDD(sc, bucket, prefixes, defaultNumPartitions)
}
}
|
EntilZha/spark-s3
|
src/main/scala/io/entilzha/spark/s3/S3Context.scala
|
Scala
|
apache-2.0
| 1,789
|
/**
* Copyright (C) 2017 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.util
import scala.util.Try
object NumericUtils {
// See also http://stackoverflow.com/questions/35098868/
def parseLong(l: String): Option[Long] =
Try(l.toLong).toOption
}
|
orbeon/orbeon-forms
|
common/shared/src/main/scala/org/orbeon/oxf/util/NumericUtils.scala
|
Scala
|
lgpl-2.1
| 867
|
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.parsing.hive
import com.flaminem.flamy.conf.FlamyContext
import org.scalatest.FreeSpec
class CreateTableParser$Test extends FreeSpec {
implicit val context = new FlamyContext("flamy.model.dir.paths" -> "src/test/resources/test")
"a correct CREATE TABLE query should be correctly parsed" in {
val query = "CREATE TABLE toto.test_table (id INT) PARTITIONED BY (week STRING)"
val expectedResult = """Table(type=TABLE, name=test_table, schema=toto, columns[Column(name=id, type=int)], partitions[Column(name=week, type=string)])"""
val actualResult = CreateTableParser.parseQuery(query)
assert(actualResult.toString === expectedResult )
}
"a correct CREATE TABLE IF NOT EXISTS query should be correctly parsed" in {
val query = "CREATE TABLE IF NOT EXISTS toto.test_table (id INT) PARTITIONED BY (week STRING)"
val expectedResult = """Table(type=TABLE, name=test_table, schema=toto, columns[Column(name=id, type=int)], partitions[Column(name=week, type=string)])"""
val actualResult = CreateTableParser.parseQuery(query)
assert(actualResult.toString === expectedResult )
}
"a correct text of multiple queries should be correctly parsed" in {
val text =
""" -- DROP TABLE IF EXISTS DBM_reports.report ;
| CREATE TABLE IF NOT EXISTS DBM_reports.report
| (device_type INT,
| mobile_make_id INT,
| mobile_model_id INT
| )
| PARTITIONED BY (day STRING)
| STORED AS SEQUENCEFILE ;
| """.stripMargin
val expectedResult = """Table(type=TABLE, name=report, schema=dbm_reports, columns[Column(name=device_type, type=int), Column(name=mobile_make_id, type=int), Column(name=mobile_model_id, type=int)], partitions[Column(name=day, type=string)])"""
val actualResult = CreateTableParser.parseText(text)
assert(actualResult.toString === expectedResult )
}
}
|
flaminem/flamy
|
src/test/scala/com/flaminem/flamy/parsing/hive/CreateTableParser$Test.scala
|
Scala
|
apache-2.0
| 2,476
|
package io.estatico.generic.traits
import org.scalatest.{FlatSpec, Matchers}
import shapeless._
class TraitInstancesTest extends FlatSpec with Matchers {
import TraitInstancesTest._
"TraitInstances" should "create a valid Generic instance" in {
val g = Generic[Foo]
g.to(foo) shouldEqual fooHList
val from = g.from(fooHList)
(from.bar, from.baz) shouldEqual (foo.bar, foo.baz)
}
it should "create Generic for single inheritance" in {
val g = Generic[Quux]
g.to(quux) shouldEqual quuxHList
val from = g.from(quuxHList)
(from.bar, from.baz, from.spam) shouldEqual (quux.bar, quux.baz, quux.spam)
}
it should "create Generic for multiple inheritance" in {
val g = Generic[OneTwoThree]
g.to(ott) shouldEqual ottHList
val from = g.from(ottHList)
(from.one, from.two, from.three) shouldEqual (ott.one, ott.two, ott.three)
}
it should "create Generic for trait with type params" in {
val g = Generic[Params[String, Int]]
g.to(params) shouldEqual paramsHList
val from = g.from(paramsHList)
(from.a, from.b) shouldEqual (params.a, params.b)
}
it should "create Generic fields only for abstract members" in {
val g = Generic[Partial]
g.to(partial) shouldEqual partialHList
val from = g.from(partialHList)
from.foo shouldEqual partial.foo
}
it should "create a valid DefaultSymbolicLabelling instance" in {
DefaultSymbolicLabelling[Foo].apply() shouldEqual fooSymbols
}
it should "create DefaultSymbolicLabelling for single inheritance" in {
DefaultSymbolicLabelling[Quux].apply() shouldEqual quuxSymbols
}
it should "create DefaultSymbolicLabelling for multiple inheritance" in {
DefaultSymbolicLabelling[OneTwoThree].apply() shouldEqual ottSymbols
}
it should "create DefaultSymbolicLabelling for trait with type params" in {
DefaultSymbolicLabelling[Params[_, _]].apply() shouldEqual paramsSymbols
}
it should "create DefaultSymbolicLabelling fields only for abstract members" in {
DefaultSymbolicLabelling[Partial].apply() shouldEqual partialSymbols
}
it should "get LabelledGeneric" in {
val g = LabelledGeneric[Foo]
val repr = g.to(foo)
repr shouldEqual fooHList
val from = g.from(repr)
(from.bar, from.baz) shouldEqual(foo.bar, foo.baz)
}
}
object TraitInstancesTest {
trait Foo {
def bar: String
def baz: Int
}
val foo = new Foo {
val bar = "a"
val baz = 2
}
val fooHList = foo.bar :: foo.baz :: HNil
val fooSymbols = 'bar :: 'baz :: HNil
trait Quux extends Foo {
def spam: Float
}
val quux = new Quux {
val bar = "b"
val baz = 3
val spam = 1.2f
}
val quuxHList = quux.bar :: quux.baz :: quux.spam :: HNil
val quuxSymbols = 'bar :: 'baz :: 'spam :: HNil
trait One {
def one: Int
}
trait Two {
def two: Int
}
trait Three {
def three: Int
}
trait OneTwoThree extends One with Two with Three
val ott = new OneTwoThree {
val one = 1
val two = 2
val three = 3
}
val ottHList = ott.one :: ott.two :: ott.three :: HNil
val ottSymbols = 'one :: 'two :: 'three :: HNil
trait Params[A, B] {
def a: A
def b: B
}
val params = new Params[String, Int] {
val a = "c"
val b = 42
}
val paramsHList = params.a :: params.b :: HNil
val paramsSymbols = 'a :: 'b :: HNil
trait PartialBase {
def foo: Int
def bar: String
}
trait Partial extends PartialBase {
def foo: Int
def bar: String = foo.toString
}
val partial = new Partial { val foo = 24 }
val partialHList = partial.foo :: HNil
val partialSymbols = 'foo :: HNil
}
|
estatico/generic-traits
|
src/test/scala/io/estatico/generic/traits/TraitInstancesTest.scala
|
Scala
|
apache-2.0
| 3,660
|
package at.magiun.core.service
import at.magiun.core.TestData._
import at.magiun.core.model.{BlockInput, BlockType}
import at.magiun.core.repository.{BlockEntity, BlockRepository}
import at.magiun.core.{MainModule, UnitTest}
import scala.concurrent.{Await, Future}
class BlockServiceTest extends UnitTest {
private val mainModule = new MainModule {
override lazy val blockRepository: BlockRepository = stub[BlockRepository]
}
private val service = mainModule.blockService
private val mockedRepo = mainModule.blockRepository
it should "return a block" in {
mockedRepo.find _ when "id-2" returns Future.successful(testBlockEntity1)
val result = Await.result(service.find("id-2"), TIMEOUT)
result.id should be("id-2")
result.`type` should be(BlockType.FileReader)
result.inputs should be(Seq(BlockInput("1", 0)))
result.params should be(Map("x" -> "4"))
}
it should "upsert a block" in {
mockedRepo.upsert _ when * returns Future.successful(testBlockEntity1)
Await.result(service.upsert(testBlock2), TIMEOUT)
val matcher = where {
(b: BlockEntity) => b.id == "id-2" && b.config == """{"inputs":[{"blockId":"1","index":0}],"params":{"x":"4"}}"""
}
mockedRepo.upsert _ verify matcher
}
}
|
Mihai238/magiun
|
core/src/test/scala/at/magiun/core/service/BlockServiceTest.scala
|
Scala
|
apache-2.0
| 1,265
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hbase
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.{HBaseTestingUtility, MiniHBaseCluster}
import org.apache.spark.{SparkConf, SparkContext}
object TestHbase
extends HBaseSQLContext(
new SparkContext("local[2]", "TestSQLContext", new SparkConf(true)
.set("spark.hadoop.hbase.zookeeper.quorum", "localhost"))) {
@transient val testUtil: HBaseTestingUtility =
new HBaseTestingUtility(sparkContext.hadoopConfiguration)
val nRegionServers: Int = 1
val nDataNodes: Int = 1
val nMasters: Int = 1
logDebug(s"Spin up hbase minicluster w/ $nMasters master, $nRegionServers RS, $nDataNodes dataNodes")
@transient val cluster: MiniHBaseCluster = testUtil.startMiniCluster(nMasters, nRegionServers, nDataNodes)
logInfo(s"Started HBaseMiniCluster with regions = ${cluster.countServedRegions}")
logInfo(s"Configuration zkPort="
+ s"${sparkContext.hadoopConfiguration.get("hbase.zookeeper.property.clientPort")}")
@transient lazy val hbaseAdmin: HBaseAdmin = new HBaseAdmin(sparkContext.hadoopConfiguration)
// The following operation will initialize the HBaseCatalog.
// And it should be done after starting MiniHBaseCluster
hbaseCatalog.deploySuccessfully_internal = Some(true)
hbaseCatalog.pwdIsAccessible = true
}
|
HuaweiBigData/astro
|
src/test/scala/org/apache/spark/sql/hbase/TestHbase.scala
|
Scala
|
apache-2.0
| 2,125
|
package me.eax.akka_examples
import akka.actor._
import akka.pattern.ask
import akka.event.LoggingReceive
import akka.routing.FromConfig
import scala.concurrent.duration._
import me.eax.akka_examples.commons._
package object profile {
case class ProfileInfo(uid: Long, username: String, email: String)
object ProfileManager {
val name = "profileManager"
def props() = Props[ProfileManager]
}
class ProfileManager extends Actor with ActorLogging {
val managerRouter = context.actorOf(Props.empty.withRouter(FromConfig), "router")
override def receive = LoggingReceive {
// сообщение от самого себя или другого менеджера
case r@RoutedMsg(uid: Long, msg: Any) =>
val actorName = s"profile-$uid"
context.child(actorName) getOrElse {
context.actorOf(ProfileActor.props(uid), actorName)
} forward msg
// сообщение с текущей ноды
case msg: RoutedMsgWithId =>
managerRouter forward RoutedMsg(msg.id, msg)
}
}
object ProfileActor {
def props(uid: Long) = Props(new ProfileActor(uid))
case class GetProfile(id: Long) extends RoutedMsgWithId
case class AskExt(mngRef: ActorRef) {
def getProfile(uid: Long) = (mngRef ? GetProfile(uid)).mapTo[ProfileInfo]
}
}
class ProfileActor(uid: Long) extends Actor with ActorLogging {
import me.eax.akka_examples.profile.ProfileActor._
private val actorLifetime = 1.minute // TODO: read from config
override def preStart() {
println(s"ProfileActor($uid): started!")
context.system.scheduler.scheduleOnce(actorLifetime, self, PoisonPill)
}
override def postStop() {
println(s"ProfileActor($uid): stopped!")
}
override def receive = LoggingReceive { case req =>
println(s"ProfileActor($uid): Request received: $req from ${sender().path.address }")
req match {
case r: GetProfile =>
sender ! ProfileInfo(uid, s"user$uid", s"user$uid@gmail.com")
}
}
}
}
|
afiskon/akka-cluster-router-example
|
src/main/scala/me/eax/akka_examples/profile/package.scala
|
Scala
|
mit
| 2,068
|
import com.thesamet.pb.EnumOptions.{EnumBase, EnumCompanionBase, ValueMixin}
import com.thesamet.proto.e2e.enum_options._
import org.scalatest._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
class EnumOptionsSpec extends AnyFlatSpec with Matchers with OptionValues {
"companion object" should "extend EnumCompanionBase" in {
MyEnum mustBe a[EnumCompanionBase]
MyEnum must not be a[EnumBase]
MyEnum must not be a[ValueMixin]
}
"enum values" should "extend EnumBase" in {
MyEnum.MyUnknown mustBe a[EnumBase]
MyEnum.V1 mustBe a[EnumBase]
MyEnum.V2 mustBe a[EnumBase]
MyEnum.Unrecognized(-1) mustBe a[EnumBase]
MyEnum.MyUnknown must not be a[ValueMixin]
MyEnum.V1 mustBe a[ValueMixin]
MyEnum.V2 must not be a[ValueMixin]
MyEnum.Unrecognized(-1) must not be a[ValueMixin]
}
"enum values" should "use naming scheme correctly" in {
MyEnum.MyThing.isMyThing must be(true)
MyEnum.FuzzBUZZ.isFuzzBuzz must be(true)
}
"enum values" should "have the scala name provided in the descriptor" in {
MyEnum.FuzzBUZZ.scalaValueDescriptor.scalaName must be("FuzzBUZZ")
MyEnum.FuzzBUZZ.name must be("ANOTHER_ONE")
MyEnum.MyThing.scalaValueDescriptor.scalaName must be("MyThing")
MyEnum.MyThing.scalaValueDescriptor.name must be("MY_THING")
}
}
|
scalapb/ScalaPB
|
e2e/src/test/scala/EnumOptionsSpec.scala
|
Scala
|
apache-2.0
| 1,352
|
package io.github.rollenholt.scala.email.sender
import akka.actor.SupervisorStrategy.{Escalate, Resume}
import akka.actor._
import akka.event.{Logging, LoggingAdapter}
import akka.routing.{DefaultResizer, RoundRobinPool}
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* @author rollenholt
*/
class EmailSenderActor extends Actor{
private val logger: LoggingAdapter = Logging(context.system, context.self.getClass)
val resizer = DefaultResizer(lowerBound = 4, upperBound = 50)
val worker: ActorRef = context.actorOf(RoundRobinPool(30, Some(resizer))
.withSupervisorStrategy(EmailSenderActor.buildSupervisorStrategy())
.props(Props[EmailSendWorker]))
override def receive: Receive = {
case emailMessage:EmailMessage => {
logger.debug("receive a email message : {}", emailMessage)
worker ! emailMessage
}
}
override val supervisorStrategy = EmailSenderActor.buildSupervisorStrategy()
}
object EmailSenderActor{
def buildSupervisorStrategy() = {
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case _: Exception => Escalate
}
}
}
|
rollenholt/scala-email-sender
|
src/main/scala/io/github/rollenholt/scala/email/sender/EmailSenderActor.scala
|
Scala
|
mit
| 1,141
|
/*
* Copyright 2014 Kate von Roeder (katevonroder at gmail dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.itsdamiya.fateclasher.platform.encoding
import com.itsdamiya.fateclasher.{MutableList, UnitSpec}
import scala.collection.mutable
import com.gvaneyck.rtmp.encoding.AMF3Encoder
class EncodingSpec extends UnitSpec with MutableList {
describe("An encoder") {
describe("when dealing with integers") {
val encodeInt = PrivateMethod[Unit]('writeInt)
it("should encode very low values") {
AMFEncoder invokePrivate encodeInt(buffer, 1)
assert(buffer(0) === 1)
}
it("should encode low values") {
AMFEncoder invokePrivate encodeInt(buffer, 160)
assert(buffer(0) === -127)
assert(buffer(1) === 32)
}
it("should encode high values") {
AMFEncoder invokePrivate encodeInt(buffer, 18000)
assert(buffer(0) === -127)
assert(buffer(1) === -116)
assert(buffer(2) === 80)
}
it("should encode very high values") {
AMFEncoder invokePrivate encodeInt(buffer, 2500000)
assert(buffer(0) === -128)
assert(buffer(1) === -52)
assert(buffer(2) === -91)
assert(buffer(3) === -96)
}
it("should encode negative values") {
AMFEncoder invokePrivate encodeInt(buffer, -100)
assert(buffer(0) === -1)
assert(buffer(1) === -1)
assert(buffer(2) === -1)
assert(buffer(3) === -100)
}
}
describe("when dealing with strings") {
new AMF3Encoder().testString()
val encodeString = PrivateMethod[Unit]('writeString)
it("should encode empty values") {
AMFEncoder invokePrivate encodeString(buffer, "")
assert(buffer(0) === 1)
}
it("should encode values") {
AMFEncoder invokePrivate encodeString(buffer, "Hello World!")
assert(buffer.toArray === Array(25, 72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100, 33))
}
}
}
}
|
Damiya/legendary
|
LibFateClasher/src/test/scala/com/itsdamiya/fateclasher/platform/encoding/EncodingSpec.scala
|
Scala
|
apache-2.0
| 2,532
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io.{InputStream, IOException}
import java.nio.channels.ClosedByInterruptException
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.CheckedInputStream
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue}
import scala.util.{Failure, Success}
import io.netty.util.internal.OutOfDirectMemoryError
import org.apache.commons.io.IOUtils
import org.roaringbitmap.RoaringBitmap
import org.apache.spark.{MapOutputTracker, TaskContext}
import org.apache.spark.MapOutputTracker.SHUFFLE_PUSH_MAP_ID
import org.apache.spark.errors.SparkCoreErrors
import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.shuffle._
import org.apache.spark.network.shuffle.checksum.{Cause, ShuffleChecksumHelper}
import org.apache.spark.network.util.{NettyUtils, TransportConf}
import org.apache.spark.shuffle.ShuffleReadMetricsReporter
import org.apache.spark.util.{CompletionIterator, TaskCompletionListener, Utils}
/**
* An iterator that fetches multiple blocks. For local blocks, it fetches from the local block
* manager. For remote blocks, it fetches them using the provided BlockTransferService.
*
* This creates an iterator of (BlockID, InputStream) tuples so the caller can handle blocks
* in a pipelined fashion as they are received.
*
* The implementation throttles the remote fetches so they don't exceed maxBytesInFlight to avoid
* using too much memory.
*
* @param context [[TaskContext]], used for metrics update
* @param shuffleClient [[BlockStoreClient]] for fetching remote blocks
* @param blockManager [[BlockManager]] for reading local blocks
* @param blocksByAddress list of blocks to fetch grouped by the [[BlockManagerId]].
* For each block we also require two info: 1. the size (in bytes as a long
* field) in order to throttle the memory usage; 2. the mapIndex for this
* block, which indicate the index in the map stage.
* Note that zero-sized blocks are already excluded, which happened in
* [[org.apache.spark.MapOutputTracker.convertMapStatuses]].
* @param mapOutputTracker [[MapOutputTracker]] for falling back to fetching the original blocks if
* we fail to fetch shuffle chunks when push based shuffle is enabled.
* @param streamWrapper A function to wrap the returned input stream.
* @param maxBytesInFlight max size (in bytes) of remote blocks to fetch at any given point.
* @param maxReqsInFlight max number of remote requests to fetch blocks at any given point.
* @param maxBlocksInFlightPerAddress max number of shuffle blocks being fetched at any given point
* for a given remote host:port.
* @param maxReqSizeShuffleToMem max size (in bytes) of a request that can be shuffled to memory.
* @param maxAttemptsOnNettyOOM The max number of a block could retry due to Netty OOM before
* throwing the fetch failure.
* @param detectCorrupt whether to detect any corruption in fetched blocks.
* @param checksumEnabled whether the shuffle checksum is enabled. When enabled, Spark will try to
* diagnose the cause of the block corruption.
* @param checksumAlgorithm the checksum algorithm that is used when calculating the checksum value
* for the block data.
* @param shuffleMetrics used to report shuffle metrics.
* @param doBatchFetch fetch continuous shuffle blocks from same executor in batch if the server
* side supports.
*/
private[spark]
final class ShuffleBlockFetcherIterator(
context: TaskContext,
shuffleClient: BlockStoreClient,
blockManager: BlockManager,
mapOutputTracker: MapOutputTracker,
blocksByAddress: Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])],
streamWrapper: (BlockId, InputStream) => InputStream,
maxBytesInFlight: Long,
maxReqsInFlight: Int,
maxBlocksInFlightPerAddress: Int,
val maxReqSizeShuffleToMem: Long,
maxAttemptsOnNettyOOM: Int,
detectCorrupt: Boolean,
detectCorruptUseExtraMemory: Boolean,
checksumEnabled: Boolean,
checksumAlgorithm: String,
shuffleMetrics: ShuffleReadMetricsReporter,
doBatchFetch: Boolean)
extends Iterator[(BlockId, InputStream)] with DownloadFileManager with Logging {
import ShuffleBlockFetcherIterator._
// Make remote requests at most maxBytesInFlight / 5 in length; the reason to keep them
// smaller than maxBytesInFlight is to allow multiple, parallel fetches from up to 5
// nodes, rather than blocking on reading output from one node.
private val targetRemoteRequestSize = math.max(maxBytesInFlight / 5, 1L)
/**
* Total number of blocks to fetch.
*/
private[this] var numBlocksToFetch = 0
/**
* The number of blocks processed by the caller. The iterator is exhausted when
* [[numBlocksProcessed]] == [[numBlocksToFetch]].
*/
private[this] var numBlocksProcessed = 0
private[this] val startTimeNs = System.nanoTime()
/** Host local blocks to fetch, excluding zero-sized blocks. */
private[this] val hostLocalBlocks = scala.collection.mutable.LinkedHashSet[(BlockId, Int)]()
/**
* A queue to hold our results. This turns the asynchronous model provided by
* [[org.apache.spark.network.BlockTransferService]] into a synchronous model (iterator).
*/
private[this] val results = new LinkedBlockingQueue[FetchResult]
/**
* Current [[FetchResult]] being processed. We track this so we can release the current buffer
* in case of a runtime exception when processing the current buffer.
*/
@volatile private[this] var currentResult: SuccessFetchResult = null
/**
* Queue of fetch requests to issue; we'll pull requests off this gradually to make sure that
* the number of bytes in flight is limited to maxBytesInFlight.
*/
private[this] val fetchRequests = new Queue[FetchRequest]
/**
* Queue of fetch requests which could not be issued the first time they were dequeued. These
* requests are tried again when the fetch constraints are satisfied.
*/
private[this] val deferredFetchRequests = new HashMap[BlockManagerId, Queue[FetchRequest]]()
/** Current bytes in flight from our requests */
private[this] var bytesInFlight = 0L
/** Current number of requests in flight */
private[this] var reqsInFlight = 0
/** Current number of blocks in flight per host:port */
private[this] val numBlocksInFlightPerAddress = new HashMap[BlockManagerId, Int]()
/**
* Count the retry times for the blocks due to Netty OOM. The block will stop retry if
* retry times has exceeded the [[maxAttemptsOnNettyOOM]].
*/
private[this] val blockOOMRetryCounts = new HashMap[String, Int]
/**
* The blocks that can't be decompressed successfully, it is used to guarantee that we retry
* at most once for those corrupted blocks.
*/
private[this] val corruptedBlocks = mutable.HashSet[BlockId]()
/**
* Whether the iterator is still active. If isZombie is true, the callback interface will no
* longer place fetched blocks into [[results]].
*/
@GuardedBy("this")
private[this] var isZombie = false
/**
* A set to store the files used for shuffling remote huge blocks. Files in this set will be
* deleted when cleanup. This is a layer of defensiveness against disk file leaks.
*/
@GuardedBy("this")
private[this] val shuffleFilesSet = mutable.HashSet[DownloadFile]()
private[this] val onCompleteCallback = new ShuffleFetchCompletionListener(this)
private[this] val pushBasedFetchHelper = new PushBasedFetchHelper(
this, shuffleClient, blockManager, mapOutputTracker)
initialize()
// Decrements the buffer reference count.
// The currentResult is set to null to prevent releasing the buffer again on cleanup()
private[storage] def releaseCurrentResultBuffer(): Unit = {
// Release the current buffer if necessary
if (currentResult != null) {
currentResult.buf.release()
}
currentResult = null
}
override def createTempFile(transportConf: TransportConf): DownloadFile = {
// we never need to do any encryption or decryption here, regardless of configs, because that
// is handled at another layer in the code. When encryption is enabled, shuffle data is written
// to disk encrypted in the first place, and sent over the network still encrypted.
new SimpleDownloadFile(
blockManager.diskBlockManager.createTempLocalBlock()._2, transportConf)
}
override def registerTempFileToClean(file: DownloadFile): Boolean = synchronized {
if (isZombie) {
false
} else {
shuffleFilesSet += file
true
}
}
/**
* Mark the iterator as zombie, and release all buffers that haven't been deserialized yet.
*/
private[storage] def cleanup(): Unit = {
synchronized {
isZombie = true
}
releaseCurrentResultBuffer()
// Release buffers in the results queue
val iter = results.iterator()
while (iter.hasNext) {
val result = iter.next()
result match {
case SuccessFetchResult(blockId, mapIndex, address, _, buf, _) =>
if (address != blockManager.blockManagerId) {
if (hostLocalBlocks.contains(blockId -> mapIndex)) {
shuffleMetrics.incLocalBlocksFetched(1)
shuffleMetrics.incLocalBytesRead(buf.size)
} else {
shuffleMetrics.incRemoteBytesRead(buf.size)
if (buf.isInstanceOf[FileSegmentManagedBuffer]) {
shuffleMetrics.incRemoteBytesReadToDisk(buf.size)
}
shuffleMetrics.incRemoteBlocksFetched(1)
}
}
buf.release()
case _ =>
}
}
shuffleFilesSet.foreach { file =>
if (!file.delete()) {
logWarning("Failed to cleanup shuffle fetch temp file " + file.path())
}
}
}
private[this] def sendRequest(req: FetchRequest): Unit = {
logDebug("Sending request for %d blocks (%s) from %s".format(
req.blocks.size, Utils.bytesToString(req.size), req.address.hostPort))
bytesInFlight += req.size
reqsInFlight += 1
// so we can look up the block info of each blockID
val infoMap = req.blocks.map {
case FetchBlockInfo(blockId, size, mapIndex) => (blockId.toString, (size, mapIndex))
}.toMap
val remainingBlocks = new HashSet[String]() ++= infoMap.keys
val deferredBlocks = new ArrayBuffer[String]()
val blockIds = req.blocks.map(_.blockId.toString)
val address = req.address
@inline def enqueueDeferredFetchRequestIfNecessary(): Unit = {
if (remainingBlocks.isEmpty && deferredBlocks.nonEmpty) {
val blocks = deferredBlocks.map { blockId =>
val (size, mapIndex) = infoMap(blockId)
FetchBlockInfo(BlockId(blockId), size, mapIndex)
}
results.put(DeferFetchRequestResult(FetchRequest(address, blocks.toSeq)))
deferredBlocks.clear()
}
}
val blockFetchingListener = new BlockFetchingListener {
override def onBlockFetchSuccess(blockId: String, buf: ManagedBuffer): Unit = {
// Only add the buffer to results queue if the iterator is not zombie,
// i.e. cleanup() has not been called yet.
ShuffleBlockFetcherIterator.this.synchronized {
if (!isZombie) {
// Increment the ref count because we need to pass this to a different thread.
// This needs to be released after use.
buf.retain()
remainingBlocks -= blockId
blockOOMRetryCounts.remove(blockId)
results.put(new SuccessFetchResult(BlockId(blockId), infoMap(blockId)._2,
address, infoMap(blockId)._1, buf, remainingBlocks.isEmpty))
logDebug("remainingBlocks: " + remainingBlocks)
enqueueDeferredFetchRequestIfNecessary()
}
}
logTrace(s"Got remote block $blockId after ${Utils.getUsedTimeNs(startTimeNs)}")
}
override def onBlockFetchFailure(blockId: String, e: Throwable): Unit = {
ShuffleBlockFetcherIterator.this.synchronized {
logError(s"Failed to get block(s) from ${req.address.host}:${req.address.port}", e)
e match {
// SPARK-27991: Catch the Netty OOM and set the flag `isNettyOOMOnShuffle` (shared among
// tasks) to true as early as possible. The pending fetch requests won't be sent
// afterwards until the flag is set to false on:
// 1) the Netty free memory >= maxReqSizeShuffleToMem
// - we'll check this whenever there's a fetch request succeeds.
// 2) the number of in-flight requests becomes 0
// - we'll check this in `fetchUpToMaxBytes` whenever it's invoked.
// Although Netty memory is shared across multiple modules, e.g., shuffle, rpc, the flag
// only takes effect for the shuffle due to the implementation simplicity concern.
// And we'll buffer the consecutive block failures caused by the OOM error until there's
// no remaining blocks in the current request. Then, we'll package these blocks into
// a same fetch request for the retry later. In this way, instead of creating the fetch
// request per block, it would help reduce the concurrent connections and data loads
// pressure at remote server.
// Note that catching OOM and do something based on it is only a workaround for
// handling the Netty OOM issue, which is not the best way towards memory management.
// We can get rid of it when we find a way to manage Netty's memory precisely.
case _: OutOfDirectMemoryError
if blockOOMRetryCounts.getOrElseUpdate(blockId, 0) < maxAttemptsOnNettyOOM =>
if (!isZombie) {
val failureTimes = blockOOMRetryCounts(blockId)
blockOOMRetryCounts(blockId) += 1
if (isNettyOOMOnShuffle.compareAndSet(false, true)) {
// The fetcher can fail remaining blocks in batch for the same error. So we only
// log the warning once to avoid flooding the logs.
logInfo(s"Block $blockId has failed $failureTimes times " +
s"due to Netty OOM, will retry")
}
remainingBlocks -= blockId
deferredBlocks += blockId
enqueueDeferredFetchRequestIfNecessary()
}
case _ =>
val block = BlockId(blockId)
if (block.isShuffleChunk) {
remainingBlocks -= blockId
results.put(FallbackOnPushMergedFailureResult(
block, address, infoMap(blockId)._1, remainingBlocks.isEmpty))
} else {
results.put(FailureFetchResult(block, infoMap(blockId)._2, address, e))
}
}
}
}
}
// Fetch remote shuffle blocks to disk when the request is too large. Since the shuffle data is
// already encrypted and compressed over the wire(w.r.t. the related configs), we can just fetch
// the data and write it to file directly.
if (req.size > maxReqSizeShuffleToMem) {
shuffleClient.fetchBlocks(address.host, address.port, address.executorId, blockIds.toArray,
blockFetchingListener, this)
} else {
shuffleClient.fetchBlocks(address.host, address.port, address.executorId, blockIds.toArray,
blockFetchingListener, null)
}
}
/**
* This is called from initialize and also from the fallback which is triggered from
* [[PushBasedFetchHelper]].
*/
private[this] def partitionBlocksByFetchMode(
blocksByAddress: Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])],
localBlocks: mutable.LinkedHashSet[(BlockId, Int)],
hostLocalBlocksByExecutor: mutable.LinkedHashMap[BlockManagerId, Seq[(BlockId, Long, Int)]],
pushMergedLocalBlocks: mutable.LinkedHashSet[BlockId]): ArrayBuffer[FetchRequest] = {
logDebug(s"maxBytesInFlight: $maxBytesInFlight, targetRemoteRequestSize: "
+ s"$targetRemoteRequestSize, maxBlocksInFlightPerAddress: $maxBlocksInFlightPerAddress")
// Partition to local, host-local, push-merged-local, remote (includes push-merged-remote)
// blocks.Remote blocks are further split into FetchRequests of size at most maxBytesInFlight
// in order to limit the amount of data in flight
val collectedRemoteRequests = new ArrayBuffer[FetchRequest]
var localBlockBytes = 0L
var hostLocalBlockBytes = 0L
var numHostLocalBlocks = 0
var pushMergedLocalBlockBytes = 0L
val prevNumBlocksToFetch = numBlocksToFetch
val fallback = FallbackStorage.FALLBACK_BLOCK_MANAGER_ID.executorId
val localExecIds = Set(blockManager.blockManagerId.executorId, fallback)
for ((address, blockInfos) <- blocksByAddress) {
checkBlockSizes(blockInfos)
if (pushBasedFetchHelper.isPushMergedShuffleBlockAddress(address)) {
// These are push-merged blocks or shuffle chunks of these blocks.
if (address.host == blockManager.blockManagerId.host) {
numBlocksToFetch += blockInfos.size
pushMergedLocalBlocks ++= blockInfos.map(_._1)
pushMergedLocalBlockBytes += blockInfos.map(_._2).sum
} else {
collectFetchRequests(address, blockInfos, collectedRemoteRequests)
}
} else if (localExecIds.contains(address.executorId)) {
val mergedBlockInfos = mergeContinuousShuffleBlockIdsIfNeeded(
blockInfos.map(info => FetchBlockInfo(info._1, info._2, info._3)), doBatchFetch)
numBlocksToFetch += mergedBlockInfos.size
localBlocks ++= mergedBlockInfos.map(info => (info.blockId, info.mapIndex))
localBlockBytes += mergedBlockInfos.map(_.size).sum
} else if (blockManager.hostLocalDirManager.isDefined &&
address.host == blockManager.blockManagerId.host) {
val mergedBlockInfos = mergeContinuousShuffleBlockIdsIfNeeded(
blockInfos.map(info => FetchBlockInfo(info._1, info._2, info._3)), doBatchFetch)
numBlocksToFetch += mergedBlockInfos.size
val blocksForAddress =
mergedBlockInfos.map(info => (info.blockId, info.size, info.mapIndex))
hostLocalBlocksByExecutor += address -> blocksForAddress
numHostLocalBlocks += blocksForAddress.size
hostLocalBlockBytes += mergedBlockInfos.map(_.size).sum
} else {
val (_, timeCost) = Utils.timeTakenMs[Unit] {
collectFetchRequests(address, blockInfos, collectedRemoteRequests)
}
logDebug(s"Collected remote fetch requests for $address in $timeCost ms")
}
}
val (remoteBlockBytes, numRemoteBlocks) =
collectedRemoteRequests.foldLeft((0L, 0))((x, y) => (x._1 + y.size, x._2 + y.blocks.size))
val totalBytes = localBlockBytes + remoteBlockBytes + hostLocalBlockBytes +
pushMergedLocalBlockBytes
val blocksToFetchCurrentIteration = numBlocksToFetch - prevNumBlocksToFetch
assert(blocksToFetchCurrentIteration == localBlocks.size +
numHostLocalBlocks + numRemoteBlocks + pushMergedLocalBlocks.size,
s"The number of non-empty blocks $blocksToFetchCurrentIteration doesn't equal to the sum " +
s"of the number of local blocks ${localBlocks.size} + " +
s"the number of host-local blocks ${numHostLocalBlocks} " +
s"the number of push-merged-local blocks ${pushMergedLocalBlocks.size} " +
s"+ the number of remote blocks ${numRemoteBlocks} ")
logInfo(s"Getting $blocksToFetchCurrentIteration " +
s"(${Utils.bytesToString(totalBytes)}) non-empty blocks including " +
s"${localBlocks.size} (${Utils.bytesToString(localBlockBytes)}) local and " +
s"${numHostLocalBlocks} (${Utils.bytesToString(hostLocalBlockBytes)}) " +
s"host-local and ${pushMergedLocalBlocks.size} " +
s"(${Utils.bytesToString(pushMergedLocalBlockBytes)}) " +
s"push-merged-local and $numRemoteBlocks (${Utils.bytesToString(remoteBlockBytes)}) " +
s"remote blocks")
this.hostLocalBlocks ++= hostLocalBlocksByExecutor.values
.flatMap { infos => infos.map(info => (info._1, info._3)) }
collectedRemoteRequests
}
private def createFetchRequest(
blocks: Seq[FetchBlockInfo],
address: BlockManagerId,
forMergedMetas: Boolean): FetchRequest = {
logDebug(s"Creating fetch request of ${blocks.map(_.size).sum} at $address "
+ s"with ${blocks.size} blocks")
FetchRequest(address, blocks, forMergedMetas)
}
private def createFetchRequests(
curBlocks: Seq[FetchBlockInfo],
address: BlockManagerId,
isLast: Boolean,
collectedRemoteRequests: ArrayBuffer[FetchRequest],
enableBatchFetch: Boolean,
forMergedMetas: Boolean = false): ArrayBuffer[FetchBlockInfo] = {
val mergedBlocks = mergeContinuousShuffleBlockIdsIfNeeded(curBlocks, enableBatchFetch)
numBlocksToFetch += mergedBlocks.size
val retBlocks = new ArrayBuffer[FetchBlockInfo]
if (mergedBlocks.length <= maxBlocksInFlightPerAddress) {
collectedRemoteRequests += createFetchRequest(mergedBlocks, address, forMergedMetas)
} else {
mergedBlocks.grouped(maxBlocksInFlightPerAddress).foreach { blocks =>
if (blocks.length == maxBlocksInFlightPerAddress || isLast) {
collectedRemoteRequests += createFetchRequest(blocks, address, forMergedMetas)
} else {
// The last group does not exceed `maxBlocksInFlightPerAddress`. Put it back
// to `curBlocks`.
retBlocks ++= blocks
numBlocksToFetch -= blocks.size
}
}
}
retBlocks
}
private def collectFetchRequests(
address: BlockManagerId,
blockInfos: Seq[(BlockId, Long, Int)],
collectedRemoteRequests: ArrayBuffer[FetchRequest]): Unit = {
val iterator = blockInfos.iterator
var curRequestSize = 0L
var curBlocks = new ArrayBuffer[FetchBlockInfo]()
while (iterator.hasNext) {
val (blockId, size, mapIndex) = iterator.next()
curBlocks += FetchBlockInfo(blockId, size, mapIndex)
curRequestSize += size
blockId match {
// Either all blocks are push-merged blocks, shuffle chunks, or original blocks.
// Based on these types, we decide to do batch fetch and create FetchRequests with
// forMergedMetas set.
case ShuffleBlockChunkId(_, _, _, _) =>
if (curRequestSize >= targetRemoteRequestSize ||
curBlocks.size >= maxBlocksInFlightPerAddress) {
curBlocks = createFetchRequests(curBlocks.toSeq, address, isLast = false,
collectedRemoteRequests, enableBatchFetch = false)
curRequestSize = curBlocks.map(_.size).sum
}
case ShuffleMergedBlockId(_, _, _) =>
if (curBlocks.size >= maxBlocksInFlightPerAddress) {
curBlocks = createFetchRequests(curBlocks.toSeq, address, isLast = false,
collectedRemoteRequests, enableBatchFetch = false, forMergedMetas = true)
}
case _ =>
// For batch fetch, the actual block in flight should count for merged block.
val mayExceedsMaxBlocks = !doBatchFetch && curBlocks.size >= maxBlocksInFlightPerAddress
if (curRequestSize >= targetRemoteRequestSize || mayExceedsMaxBlocks) {
curBlocks = createFetchRequests(curBlocks.toSeq, address, isLast = false,
collectedRemoteRequests, doBatchFetch)
curRequestSize = curBlocks.map(_.size).sum
}
}
}
// Add in the final request
if (curBlocks.nonEmpty) {
val (enableBatchFetch, forMergedMetas) = {
curBlocks.head.blockId match {
case ShuffleBlockChunkId(_, _, _, _) => (false, false)
case ShuffleMergedBlockId(_, _, _) => (false, true)
case _ => (doBatchFetch, false)
}
}
createFetchRequests(curBlocks.toSeq, address, isLast = true, collectedRemoteRequests,
enableBatchFetch = enableBatchFetch, forMergedMetas = forMergedMetas)
}
}
private def assertPositiveBlockSize(blockId: BlockId, blockSize: Long): Unit = {
if (blockSize < 0) {
throw BlockException(blockId, "Negative block size " + size)
} else if (blockSize == 0) {
throw BlockException(blockId, "Zero-sized blocks should be excluded.")
}
}
private def checkBlockSizes(blockInfos: Seq[(BlockId, Long, Int)]): Unit = {
blockInfos.foreach { case (blockId, size, _) => assertPositiveBlockSize(blockId, size) }
}
/**
* Fetch the local blocks while we are fetching remote blocks. This is ok because
* `ManagedBuffer`'s memory is allocated lazily when we create the input stream, so all we
* track in-memory are the ManagedBuffer references themselves.
*/
private[this] def fetchLocalBlocks(
localBlocks: mutable.LinkedHashSet[(BlockId, Int)]): Unit = {
logDebug(s"Start fetching local blocks: ${localBlocks.mkString(", ")}")
val iter = localBlocks.iterator
while (iter.hasNext) {
val (blockId, mapIndex) = iter.next()
try {
val buf = blockManager.getLocalBlockData(blockId)
shuffleMetrics.incLocalBlocksFetched(1)
shuffleMetrics.incLocalBytesRead(buf.size)
buf.retain()
results.put(new SuccessFetchResult(blockId, mapIndex, blockManager.blockManagerId,
buf.size(), buf, false))
} catch {
// If we see an exception, stop immediately.
case e: Exception =>
e match {
// ClosedByInterruptException is an excepted exception when kill task,
// don't log the exception stack trace to avoid confusing users.
// See: SPARK-28340
case ce: ClosedByInterruptException =>
logError("Error occurred while fetching local blocks, " + ce.getMessage)
case ex: Exception => logError("Error occurred while fetching local blocks", ex)
}
results.put(new FailureFetchResult(blockId, mapIndex, blockManager.blockManagerId, e))
return
}
}
}
private[this] def fetchHostLocalBlock(
blockId: BlockId,
mapIndex: Int,
localDirs: Array[String],
blockManagerId: BlockManagerId): Boolean = {
try {
val buf = blockManager.getHostLocalShuffleData(blockId, localDirs)
buf.retain()
results.put(SuccessFetchResult(blockId, mapIndex, blockManagerId, buf.size(), buf,
isNetworkReqDone = false))
true
} catch {
case e: Exception =>
// If we see an exception, stop immediately.
logError(s"Error occurred while fetching local blocks", e)
results.put(FailureFetchResult(blockId, mapIndex, blockManagerId, e))
false
}
}
/**
* Fetch the host-local blocks while we are fetching remote blocks. This is ok because
* `ManagedBuffer`'s memory is allocated lazily when we create the input stream, so all we
* track in-memory are the ManagedBuffer references themselves.
*/
private[this] def fetchHostLocalBlocks(
hostLocalDirManager: HostLocalDirManager,
hostLocalBlocksByExecutor: mutable.LinkedHashMap[BlockManagerId, Seq[(BlockId, Long, Int)]]):
Unit = {
val cachedDirsByExec = hostLocalDirManager.getCachedHostLocalDirs
val (hostLocalBlocksWithCachedDirs, hostLocalBlocksWithMissingDirs) = {
val (hasCache, noCache) = hostLocalBlocksByExecutor.partition { case (hostLocalBmId, _) =>
cachedDirsByExec.contains(hostLocalBmId.executorId)
}
(hasCache.toMap, noCache.toMap)
}
if (hostLocalBlocksWithMissingDirs.nonEmpty) {
logDebug(s"Asynchronous fetching host-local blocks without cached executors' dir: " +
s"${hostLocalBlocksWithMissingDirs.mkString(", ")}")
// If the external shuffle service is enabled, we'll fetch the local directories for
// multiple executors from the external shuffle service, which located at the same host
// with the executors, in once. Otherwise, we'll fetch the local directories from those
// executors directly one by one. The fetch requests won't be too much since one host is
// almost impossible to have many executors at the same time practically.
val dirFetchRequests = if (blockManager.externalShuffleServiceEnabled) {
val host = blockManager.blockManagerId.host
val port = blockManager.externalShuffleServicePort
Seq((host, port, hostLocalBlocksWithMissingDirs.keys.toArray))
} else {
hostLocalBlocksWithMissingDirs.keys.map(bmId => (bmId.host, bmId.port, Array(bmId))).toSeq
}
dirFetchRequests.foreach { case (host, port, bmIds) =>
hostLocalDirManager.getHostLocalDirs(host, port, bmIds.map(_.executorId)) {
case Success(dirsByExecId) =>
fetchMultipleHostLocalBlocks(
hostLocalBlocksWithMissingDirs.filterKeys(bmIds.contains).toMap,
dirsByExecId,
cached = false)
case Failure(throwable) =>
logError("Error occurred while fetching host local blocks", throwable)
val bmId = bmIds.head
val blockInfoSeq = hostLocalBlocksWithMissingDirs(bmId)
val (blockId, _, mapIndex) = blockInfoSeq.head
results.put(FailureFetchResult(blockId, mapIndex, bmId, throwable))
}
}
}
if (hostLocalBlocksWithCachedDirs.nonEmpty) {
logDebug(s"Synchronous fetching host-local blocks with cached executors' dir: " +
s"${hostLocalBlocksWithCachedDirs.mkString(", ")}")
fetchMultipleHostLocalBlocks(hostLocalBlocksWithCachedDirs, cachedDirsByExec, cached = true)
}
}
private def fetchMultipleHostLocalBlocks(
bmIdToBlocks: Map[BlockManagerId, Seq[(BlockId, Long, Int)]],
localDirsByExecId: Map[String, Array[String]],
cached: Boolean): Unit = {
// We use `forall` because once there's a failed block fetch, `fetchHostLocalBlock` will put
// a `FailureFetchResult` immediately to the `results`. So there's no reason to fetch the
// remaining blocks.
val allFetchSucceeded = bmIdToBlocks.forall { case (bmId, blockInfos) =>
blockInfos.forall { case (blockId, _, mapIndex) =>
fetchHostLocalBlock(blockId, mapIndex, localDirsByExecId(bmId.executorId), bmId)
}
}
if (allFetchSucceeded) {
logDebug(s"Got host-local blocks from ${bmIdToBlocks.keys.mkString(", ")} " +
s"(${if (cached) "with" else "without"} cached executors' dir) " +
s"in ${Utils.getUsedTimeNs(startTimeNs)}")
}
}
private[this] def initialize(): Unit = {
// Add a task completion callback (called in both success case and failure case) to cleanup.
context.addTaskCompletionListener(onCompleteCallback)
// Local blocks to fetch, excluding zero-sized blocks.
val localBlocks = mutable.LinkedHashSet[(BlockId, Int)]()
val hostLocalBlocksByExecutor =
mutable.LinkedHashMap[BlockManagerId, Seq[(BlockId, Long, Int)]]()
val pushMergedLocalBlocks = mutable.LinkedHashSet[BlockId]()
// Partition blocks by the different fetch modes: local, host-local, push-merged-local and
// remote blocks.
val remoteRequests = partitionBlocksByFetchMode(
blocksByAddress, localBlocks, hostLocalBlocksByExecutor, pushMergedLocalBlocks)
// Add the remote requests into our queue in a random order
fetchRequests ++= Utils.randomize(remoteRequests)
assert ((0 == reqsInFlight) == (0 == bytesInFlight),
"expected reqsInFlight = 0 but found reqsInFlight = " + reqsInFlight +
", expected bytesInFlight = 0 but found bytesInFlight = " + bytesInFlight)
// Send out initial requests for blocks, up to our maxBytesInFlight
fetchUpToMaxBytes()
val numDeferredRequest = deferredFetchRequests.values.map(_.size).sum
val numFetches = remoteRequests.size - fetchRequests.size - numDeferredRequest
logInfo(s"Started $numFetches remote fetches in ${Utils.getUsedTimeNs(startTimeNs)}" +
(if (numDeferredRequest > 0 ) s", deferred $numDeferredRequest requests" else ""))
// Get Local Blocks
fetchLocalBlocks(localBlocks)
logDebug(s"Got local blocks in ${Utils.getUsedTimeNs(startTimeNs)}")
// Get host local blocks if any
fetchAllHostLocalBlocks(hostLocalBlocksByExecutor)
pushBasedFetchHelper.fetchAllPushMergedLocalBlocks(pushMergedLocalBlocks)
}
private def fetchAllHostLocalBlocks(
hostLocalBlocksByExecutor: mutable.LinkedHashMap[BlockManagerId, Seq[(BlockId, Long, Int)]]):
Unit = {
if (hostLocalBlocksByExecutor.nonEmpty) {
blockManager.hostLocalDirManager.foreach(fetchHostLocalBlocks(_, hostLocalBlocksByExecutor))
}
}
override def hasNext: Boolean = numBlocksProcessed < numBlocksToFetch
/**
* Fetches the next (BlockId, InputStream). If a task fails, the ManagedBuffers
* underlying each InputStream will be freed by the cleanup() method registered with the
* TaskCompletionListener. However, callers should close() these InputStreams
* as soon as they are no longer needed, in order to release memory as early as possible.
*
* Throws a FetchFailedException if the next block could not be fetched.
*/
override def next(): (BlockId, InputStream) = {
if (!hasNext) {
throw SparkCoreErrors.noSuchElementError()
}
numBlocksProcessed += 1
var result: FetchResult = null
var input: InputStream = null
// This's only initialized when shuffle checksum is enabled.
var checkedIn: CheckedInputStream = null
var streamCompressedOrEncrypted: Boolean = false
// Take the next fetched result and try to decompress it to detect data corruption,
// then fetch it one more time if it's corrupt, throw FailureFetchResult if the second fetch
// is also corrupt, so the previous stage could be retried.
// For local shuffle block, throw FailureFetchResult for the first IOException.
while (result == null) {
val startFetchWait = System.nanoTime()
result = results.take()
val fetchWaitTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startFetchWait)
shuffleMetrics.incFetchWaitTime(fetchWaitTime)
result match {
case r @ SuccessFetchResult(blockId, mapIndex, address, size, buf, isNetworkReqDone) =>
if (address != blockManager.blockManagerId) {
if (hostLocalBlocks.contains(blockId -> mapIndex) ||
pushBasedFetchHelper.isLocalPushMergedBlockAddress(address)) {
// It is a host local block or a local shuffle chunk
shuffleMetrics.incLocalBlocksFetched(1)
shuffleMetrics.incLocalBytesRead(buf.size)
} else {
numBlocksInFlightPerAddress(address) = numBlocksInFlightPerAddress(address) - 1
shuffleMetrics.incRemoteBytesRead(buf.size)
if (buf.isInstanceOf[FileSegmentManagedBuffer]) {
shuffleMetrics.incRemoteBytesReadToDisk(buf.size)
}
shuffleMetrics.incRemoteBlocksFetched(1)
bytesInFlight -= size
}
}
if (isNetworkReqDone) {
reqsInFlight -= 1
resetNettyOOMFlagIfPossible(maxReqSizeShuffleToMem)
logDebug("Number of requests in flight " + reqsInFlight)
}
if (buf.size == 0) {
// We will never legitimately receive a zero-size block. All blocks with zero records
// have zero size and all zero-size blocks have no records (and hence should never
// have been requested in the first place). This statement relies on behaviors of the
// shuffle writers, which are guaranteed by the following test cases:
//
// - BypassMergeSortShuffleWriterSuite: "write with some empty partitions"
// - UnsafeShuffleWriterSuite: "writeEmptyIterator"
// - DiskBlockObjectWriterSuite: "commit() and close() without ever opening or writing"
//
// There is not an explicit test for SortShuffleWriter but the underlying APIs that
// uses are shared by the UnsafeShuffleWriter (both writers use DiskBlockObjectWriter
// which returns a zero-size from commitAndGet() in case no records were written
// since the last call.
val msg = s"Received a zero-size buffer for block $blockId from $address " +
s"(expectedApproxSize = $size, isNetworkReqDone=$isNetworkReqDone)"
throwFetchFailedException(blockId, mapIndex, address, new IOException(msg))
}
val in = try {
var bufIn = buf.createInputStream()
if (checksumEnabled) {
val checksum = ShuffleChecksumHelper.getChecksumByAlgorithm(checksumAlgorithm)
checkedIn = new CheckedInputStream(bufIn, checksum)
checkedIn
} else {
bufIn
}
} catch {
// The exception could only be throwed by local shuffle block
case e: IOException =>
assert(buf.isInstanceOf[FileSegmentManagedBuffer])
e match {
case ce: ClosedByInterruptException =>
logError("Failed to create input stream from local block, " +
ce.getMessage)
case e: IOException => logError("Failed to create input stream from local block", e)
}
buf.release()
if (blockId.isShuffleChunk) {
pushBasedFetchHelper.initiateFallbackFetchForPushMergedBlock(blockId, address)
// Set result to null to trigger another iteration of the while loop to get either.
result = null
null
} else {
throwFetchFailedException(blockId, mapIndex, address, e)
}
}
if (in != null) {
try {
input = streamWrapper(blockId, in)
// If the stream is compressed or wrapped, then we optionally decompress/unwrap the
// first maxBytesInFlight/3 bytes into memory, to check for corruption in that portion
// of the data. But even if 'detectCorruptUseExtraMemory' configuration is off, or if
// the corruption is later, we'll still detect the corruption later in the stream.
streamCompressedOrEncrypted = !input.eq(in)
if (streamCompressedOrEncrypted && detectCorruptUseExtraMemory) {
// TODO: manage the memory used here, and spill it into disk in case of OOM.
input = Utils.copyStreamUpTo(input, maxBytesInFlight / 3)
}
} catch {
case e: IOException =>
// When shuffle checksum is enabled, for a block that is corrupted twice,
// we'd calculate the checksum of the block by consuming the remaining data
// in the buf. So, we should release the buf later.
if (!(checksumEnabled && corruptedBlocks.contains(blockId))) {
buf.release()
}
if (blockId.isShuffleChunk) {
// TODO (SPARK-36284): Add shuffle checksum support for push-based shuffle
// Retrying a corrupt block may result again in a corrupt block. For shuffle
// chunks, we opt to fallback on the original shuffle blocks that belong to that
// corrupt shuffle chunk immediately instead of retrying to fetch the corrupt
// chunk. This also makes the code simpler because the chunkMeta corresponding to
// a shuffle chunk is always removed from chunksMetaMap whenever a shuffle chunk
// gets processed. If we try to re-fetch a corrupt shuffle chunk, then it has to
// be added back to the chunksMetaMap.
pushBasedFetchHelper.initiateFallbackFetchForPushMergedBlock(blockId, address)
// Set result to null to trigger another iteration of the while loop.
result = null
} else if (buf.isInstanceOf[FileSegmentManagedBuffer]) {
throwFetchFailedException(blockId, mapIndex, address, e)
} else if (corruptedBlocks.contains(blockId)) {
// It's the second time this block is detected corrupted
if (checksumEnabled) {
// Diagnose the cause of data corruption if shuffle checksum is enabled
val diagnosisResponse = diagnoseCorruption(checkedIn, address, blockId)
buf.release()
logError(diagnosisResponse)
throwFetchFailedException(
blockId, mapIndex, address, e, Some(diagnosisResponse))
} else {
throwFetchFailedException(blockId, mapIndex, address, e)
}
} else {
// It's the first time this block is detected corrupted
logWarning(s"got an corrupted block $blockId from $address, fetch again", e)
corruptedBlocks += blockId
fetchRequests += FetchRequest(
address, Array(FetchBlockInfo(blockId, size, mapIndex)))
result = null
}
} finally {
if (blockId.isShuffleChunk) {
pushBasedFetchHelper.removeChunk(blockId.asInstanceOf[ShuffleBlockChunkId])
}
// TODO: release the buf here to free memory earlier
if (input == null) {
// Close the underlying stream if there was an issue in wrapping the stream using
// streamWrapper
in.close()
}
}
}
case FailureFetchResult(blockId, mapIndex, address, e) =>
var errorMsg: String = null
if (e.isInstanceOf[OutOfDirectMemoryError]) {
errorMsg = s"Block $blockId fetch failed after $maxAttemptsOnNettyOOM " +
s"retries due to Netty OOM"
logError(errorMsg)
}
throwFetchFailedException(blockId, mapIndex, address, e, Some(errorMsg))
case DeferFetchRequestResult(request) =>
val address = request.address
numBlocksInFlightPerAddress(address) =
numBlocksInFlightPerAddress(address) - request.blocks.size
bytesInFlight -= request.size
reqsInFlight -= 1
logDebug("Number of requests in flight " + reqsInFlight)
val defReqQueue =
deferredFetchRequests.getOrElseUpdate(address, new Queue[FetchRequest]())
defReqQueue.enqueue(request)
result = null
case FallbackOnPushMergedFailureResult(blockId, address, size, isNetworkReqDone) =>
// We get this result in 3 cases:
// 1. Failure to fetch the data of a remote shuffle chunk. In this case, the
// blockId is a ShuffleBlockChunkId.
// 2. Failure to read the push-merged-local meta. In this case, the blockId is
// ShuffleBlockId.
// 3. Failure to get the push-merged-local directories from the external shuffle service.
// In this case, the blockId is ShuffleBlockId.
if (pushBasedFetchHelper.isRemotePushMergedBlockAddress(address)) {
numBlocksInFlightPerAddress(address) = numBlocksInFlightPerAddress(address) - 1
bytesInFlight -= size
}
if (isNetworkReqDone) {
reqsInFlight -= 1
logDebug("Number of requests in flight " + reqsInFlight)
}
pushBasedFetchHelper.initiateFallbackFetchForPushMergedBlock(blockId, address)
// Set result to null to trigger another iteration of the while loop to get either
// a SuccessFetchResult or a FailureFetchResult.
result = null
case PushMergedLocalMetaFetchResult(
shuffleId, shuffleMergeId, reduceId, bitmaps, localDirs) =>
// Fetch push-merged-local shuffle block data as multiple shuffle chunks
val shuffleBlockId = ShuffleMergedBlockId(shuffleId, shuffleMergeId, reduceId)
try {
val bufs: Seq[ManagedBuffer] = blockManager.getLocalMergedBlockData(shuffleBlockId,
localDirs)
// Since the request for local block meta completed successfully, numBlocksToFetch
// is decremented.
numBlocksToFetch -= 1
// Update total number of blocks to fetch, reflecting the multiple local shuffle
// chunks.
numBlocksToFetch += bufs.size
bufs.zipWithIndex.foreach { case (buf, chunkId) =>
buf.retain()
val shuffleChunkId = ShuffleBlockChunkId(shuffleId, shuffleMergeId, reduceId,
chunkId)
pushBasedFetchHelper.addChunk(shuffleChunkId, bitmaps(chunkId))
results.put(SuccessFetchResult(shuffleChunkId, SHUFFLE_PUSH_MAP_ID,
pushBasedFetchHelper.localShuffleMergerBlockMgrId, buf.size(), buf,
isNetworkReqDone = false))
}
} catch {
case e: Exception =>
// If we see an exception with reading push-merged-local index file, we fallback
// to fetch the original blocks. We do not report block fetch failure
// and will continue with the remaining local block read.
logWarning(s"Error occurred while reading push-merged-local index, " +
s"prepare to fetch the original blocks", e)
pushBasedFetchHelper.initiateFallbackFetchForPushMergedBlock(
shuffleBlockId, pushBasedFetchHelper.localShuffleMergerBlockMgrId)
}
result = null
case PushMergedRemoteMetaFetchResult(
shuffleId, shuffleMergeId, reduceId, blockSize, bitmaps, address) =>
// The original meta request is processed so we decrease numBlocksToFetch and
// numBlocksInFlightPerAddress by 1. We will collect new shuffle chunks request and the
// count of this is added to numBlocksToFetch in collectFetchReqsFromMergedBlocks.
numBlocksInFlightPerAddress(address) = numBlocksInFlightPerAddress(address) - 1
numBlocksToFetch -= 1
val blocksToFetch = pushBasedFetchHelper.createChunkBlockInfosFromMetaResponse(
shuffleId, shuffleMergeId, reduceId, blockSize, bitmaps)
val additionalRemoteReqs = new ArrayBuffer[FetchRequest]
collectFetchRequests(address, blocksToFetch.toSeq, additionalRemoteReqs)
fetchRequests ++= additionalRemoteReqs
// Set result to null to force another iteration.
result = null
case PushMergedRemoteMetaFailedFetchResult(
shuffleId, shuffleMergeId, reduceId, address) =>
// The original meta request failed so we decrease numBlocksInFlightPerAddress by 1.
numBlocksInFlightPerAddress(address) = numBlocksInFlightPerAddress(address) - 1
// If we fail to fetch the meta of a push-merged block, we fall back to fetching the
// original blocks.
pushBasedFetchHelper.initiateFallbackFetchForPushMergedBlock(
ShuffleMergedBlockId(shuffleId, shuffleMergeId, reduceId), address)
// Set result to null to force another iteration.
result = null
}
// Send fetch requests up to maxBytesInFlight
fetchUpToMaxBytes()
}
currentResult = result.asInstanceOf[SuccessFetchResult]
(currentResult.blockId,
new BufferReleasingInputStream(
input,
this,
currentResult.blockId,
currentResult.mapIndex,
currentResult.address,
detectCorrupt && streamCompressedOrEncrypted,
currentResult.isNetworkReqDone,
Option(checkedIn)))
}
/**
* Get the suspect corruption cause for the corrupted block. It should be only invoked
* when checksum is enabled and corruption was detected at least once.
*
* This will firstly consume the rest of stream of the corrupted block to calculate the
* checksum of the block. Then, it will raise a synchronized RPC call along with the
* checksum to ask the server(where the corrupted block is fetched from) to diagnose the
* cause of corruption and return it.
*
* Any exception raised during the process will result in the [[Cause.UNKNOWN_ISSUE]] of the
* corruption cause since corruption diagnosis is only a best effort.
*
* @param checkedIn the [[CheckedInputStream]] which is used to calculate the checksum.
* @param address the address where the corrupted block is fetched from.
* @param blockId the blockId of the corrupted block.
* @return The corruption diagnosis response for different causes.
*/
private[storage] def diagnoseCorruption(
checkedIn: CheckedInputStream,
address: BlockManagerId,
blockId: BlockId): String = {
logInfo("Start corruption diagnosis.")
val startTimeNs = System.nanoTime()
assert(blockId.isInstanceOf[ShuffleBlockId], s"Expected ShuffleBlockId, but got $blockId")
val shuffleBlock = blockId.asInstanceOf[ShuffleBlockId]
val buffer = new Array[Byte](ShuffleChecksumHelper.CHECKSUM_CALCULATION_BUFFER)
// consume the remaining data to calculate the checksum
var cause: Cause = null
try {
while (checkedIn.read(buffer) != -1) {}
val checksum = checkedIn.getChecksum.getValue
cause = shuffleClient.diagnoseCorruption(address.host, address.port, address.executorId,
shuffleBlock.shuffleId, shuffleBlock.mapId, shuffleBlock.reduceId, checksum,
checksumAlgorithm)
} catch {
case e: Exception =>
logWarning("Unable to diagnose the corruption cause of the corrupted block", e)
cause = Cause.UNKNOWN_ISSUE
}
val duration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs)
val diagnosisResponse = cause match {
case Cause.UNSUPPORTED_CHECKSUM_ALGORITHM =>
s"Block $blockId is corrupted but corruption diagnosis failed due to " +
s"unsupported checksum algorithm: $checksumAlgorithm"
case Cause.CHECKSUM_VERIFY_PASS =>
s"Block $blockId is corrupted but checksum verification passed"
case Cause.UNKNOWN_ISSUE =>
s"Block $blockId is corrupted but the cause is unknown"
case otherCause =>
s"Block $blockId is corrupted due to $otherCause"
}
logInfo(s"Finished corruption diagnosis in $duration ms. $diagnosisResponse")
diagnosisResponse
}
def toCompletionIterator: Iterator[(BlockId, InputStream)] = {
CompletionIterator[(BlockId, InputStream), this.type](this,
onCompleteCallback.onComplete(context))
}
private def fetchUpToMaxBytes(): Unit = {
if (isNettyOOMOnShuffle.get()) {
if (reqsInFlight > 0) {
// Return immediately if Netty is still OOMed and there're ongoing fetch requests
return
} else {
resetNettyOOMFlagIfPossible(0)
}
}
// Send fetch requests up to maxBytesInFlight. If you cannot fetch from a remote host
// immediately, defer the request until the next time it can be processed.
// Process any outstanding deferred fetch requests if possible.
if (deferredFetchRequests.nonEmpty) {
for ((remoteAddress, defReqQueue) <- deferredFetchRequests) {
while (isRemoteBlockFetchable(defReqQueue) &&
!isRemoteAddressMaxedOut(remoteAddress, defReqQueue.front)) {
val request = defReqQueue.dequeue()
logDebug(s"Processing deferred fetch request for $remoteAddress with "
+ s"${request.blocks.length} blocks")
send(remoteAddress, request)
if (defReqQueue.isEmpty) {
deferredFetchRequests -= remoteAddress
}
}
}
}
// Process any regular fetch requests if possible.
while (isRemoteBlockFetchable(fetchRequests)) {
val request = fetchRequests.dequeue()
val remoteAddress = request.address
if (isRemoteAddressMaxedOut(remoteAddress, request)) {
logDebug(s"Deferring fetch request for $remoteAddress with ${request.blocks.size} blocks")
val defReqQueue = deferredFetchRequests.getOrElse(remoteAddress, new Queue[FetchRequest]())
defReqQueue.enqueue(request)
deferredFetchRequests(remoteAddress) = defReqQueue
} else {
send(remoteAddress, request)
}
}
def send(remoteAddress: BlockManagerId, request: FetchRequest): Unit = {
if (request.forMergedMetas) {
pushBasedFetchHelper.sendFetchMergedStatusRequest(request)
} else {
sendRequest(request)
}
numBlocksInFlightPerAddress(remoteAddress) =
numBlocksInFlightPerAddress.getOrElse(remoteAddress, 0) + request.blocks.size
}
def isRemoteBlockFetchable(fetchReqQueue: Queue[FetchRequest]): Boolean = {
fetchReqQueue.nonEmpty &&
(bytesInFlight == 0 ||
(reqsInFlight + 1 <= maxReqsInFlight &&
bytesInFlight + fetchReqQueue.front.size <= maxBytesInFlight))
}
// Checks if sending a new fetch request will exceed the max no. of blocks being fetched from a
// given remote address.
def isRemoteAddressMaxedOut(remoteAddress: BlockManagerId, request: FetchRequest): Boolean = {
numBlocksInFlightPerAddress.getOrElse(remoteAddress, 0) + request.blocks.size >
maxBlocksInFlightPerAddress
}
}
private[storage] def throwFetchFailedException(
blockId: BlockId,
mapIndex: Int,
address: BlockManagerId,
e: Throwable,
message: Option[String] = None) = {
val msg = message.getOrElse(e.getMessage)
blockId match {
case ShuffleBlockId(shufId, mapId, reduceId) =>
throw SparkCoreErrors.fetchFailedError(address, shufId, mapId, mapIndex, reduceId, msg, e)
case ShuffleBlockBatchId(shuffleId, mapId, startReduceId, _) =>
throw SparkCoreErrors.fetchFailedError(address, shuffleId, mapId, mapIndex, startReduceId,
msg, e)
case _ => throw SparkCoreErrors.failToGetNonShuffleBlockError(blockId, e)
}
}
/**
* All the below methods are used by [[PushBasedFetchHelper]] to communicate with the iterator
*/
private[storage] def addToResultsQueue(result: FetchResult): Unit = {
results.put(result)
}
private[storage] def decreaseNumBlocksToFetch(blocksFetched: Int): Unit = {
numBlocksToFetch -= blocksFetched
}
/**
* Currently used by [[PushBasedFetchHelper]] to fetch fallback blocks when there is a fetch
* failure related to a push-merged block or shuffle chunk.
* This is executed by the task thread when the `iterator.next()` is invoked and if that initiates
* fallback.
*/
private[storage] def fallbackFetch(
originalBlocksByAddr: Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])]): Unit = {
val originalLocalBlocks = mutable.LinkedHashSet[(BlockId, Int)]()
val originalHostLocalBlocksByExecutor =
mutable.LinkedHashMap[BlockManagerId, Seq[(BlockId, Long, Int)]]()
val originalMergedLocalBlocks = mutable.LinkedHashSet[BlockId]()
val originalRemoteReqs = partitionBlocksByFetchMode(originalBlocksByAddr,
originalLocalBlocks, originalHostLocalBlocksByExecutor, originalMergedLocalBlocks)
// Add the remote requests into our queue in a random order
fetchRequests ++= Utils.randomize(originalRemoteReqs)
logInfo(s"Created ${originalRemoteReqs.size} fallback remote requests for push-merged")
// fetch all the fallback blocks that are local.
fetchLocalBlocks(originalLocalBlocks)
// Merged local blocks should be empty during fallback
assert(originalMergedLocalBlocks.isEmpty,
"There should be zero push-merged blocks during fallback")
// Some of the fallback local blocks could be host local blocks
fetchAllHostLocalBlocks(originalHostLocalBlocksByExecutor)
}
/**
* Removes all the pending shuffle chunks that are on the same host and have the same reduceId as
* the current chunk that had a fetch failure.
* This is executed by the task thread when the `iterator.next()` is invoked and if that initiates
* fallback.
*
* @return set of all the removed shuffle chunk Ids.
*/
private[storage] def removePendingChunks(
failedBlockId: ShuffleBlockChunkId,
address: BlockManagerId): mutable.HashSet[ShuffleBlockChunkId] = {
val removedChunkIds = new mutable.HashSet[ShuffleBlockChunkId]()
def sameShuffleReducePartition(block: BlockId): Boolean = {
val chunkId = block.asInstanceOf[ShuffleBlockChunkId]
chunkId.shuffleId == failedBlockId.shuffleId && chunkId.reduceId == failedBlockId.reduceId
}
def filterRequests(queue: mutable.Queue[FetchRequest]): Unit = {
val fetchRequestsToRemove = new mutable.Queue[FetchRequest]()
fetchRequestsToRemove ++= queue.dequeueAll { req =>
val firstBlock = req.blocks.head
firstBlock.blockId.isShuffleChunk && req.address.equals(address) &&
sameShuffleReducePartition(firstBlock.blockId)
}
fetchRequestsToRemove.foreach { _ =>
removedChunkIds ++=
fetchRequestsToRemove.flatMap(_.blocks.map(_.blockId.asInstanceOf[ShuffleBlockChunkId]))
}
}
filterRequests(fetchRequests)
deferredFetchRequests.get(address).foreach { defRequests =>
filterRequests(defRequests)
if (defRequests.isEmpty) deferredFetchRequests.remove(address)
}
removedChunkIds
}
}
/**
* Helper class that ensures a ManagedBuffer is released upon InputStream.close() and
* also detects stream corruption if streamCompressedOrEncrypted is true
*/
private class BufferReleasingInputStream(
// This is visible for testing
private[storage] val delegate: InputStream,
private val iterator: ShuffleBlockFetcherIterator,
private val blockId: BlockId,
private val mapIndex: Int,
private val address: BlockManagerId,
private val detectCorruption: Boolean,
private val isNetworkReqDone: Boolean,
private val checkedInOpt: Option[CheckedInputStream])
extends InputStream {
private[this] var closed = false
override def read(): Int =
tryOrFetchFailedException(delegate.read())
override def close(): Unit = {
if (!closed) {
try {
delegate.close()
iterator.releaseCurrentResultBuffer()
} finally {
// Unset the flag when a remote request finished and free memory is fairly enough.
if (isNetworkReqDone) {
ShuffleBlockFetcherIterator.resetNettyOOMFlagIfPossible(iterator.maxReqSizeShuffleToMem)
}
closed = true
}
}
}
override def available(): Int = delegate.available()
override def mark(readlimit: Int): Unit = delegate.mark(readlimit)
override def skip(n: Long): Long =
tryOrFetchFailedException(delegate.skip(n))
override def markSupported(): Boolean = delegate.markSupported()
override def read(b: Array[Byte]): Int =
tryOrFetchFailedException(delegate.read(b))
override def read(b: Array[Byte], off: Int, len: Int): Int =
tryOrFetchFailedException(delegate.read(b, off, len))
override def reset(): Unit = delegate.reset()
/**
* Execute a block of code that returns a value, close this stream quietly and re-throwing
* IOException as FetchFailedException when detectCorruption is true. This method is only
* used by the `read` and `skip` methods inside `BufferReleasingInputStream` currently.
*/
private def tryOrFetchFailedException[T](block: => T): T = {
try {
block
} catch {
case e: IOException if detectCorruption =>
val diagnosisResponse = checkedInOpt.map { checkedIn =>
iterator.diagnoseCorruption(checkedIn, address, blockId)
}
IOUtils.closeQuietly(this)
// We'd never retry the block whatever the cause is since the block has been
// partially consumed by downstream RDDs.
iterator.throwFetchFailedException(blockId, mapIndex, address, e, diagnosisResponse)
}
}
}
/**
* A listener to be called at the completion of the ShuffleBlockFetcherIterator
* @param data the ShuffleBlockFetcherIterator to process
*/
private class ShuffleFetchCompletionListener(var data: ShuffleBlockFetcherIterator)
extends TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = {
if (data != null) {
data.cleanup()
// Null out the referent here to make sure we don't keep a reference to this
// ShuffleBlockFetcherIterator, after we're done reading from it, to let it be
// collected during GC. Otherwise we can hold metadata on block locations(blocksByAddress)
data = null
}
}
// Just an alias for onTaskCompletion to avoid confusing
def onComplete(context: TaskContext): Unit = this.onTaskCompletion(context)
}
private[storage]
object ShuffleBlockFetcherIterator {
/**
* A flag which indicates whether the Netty OOM error has raised during shuffle.
* If true, unless there's no in-flight fetch requests, all the pending shuffle
* fetch requests will be deferred until the flag is unset (whenever there's a
* complete fetch request).
*/
val isNettyOOMOnShuffle = new AtomicBoolean(false)
def resetNettyOOMFlagIfPossible(freeMemoryLowerBound: Long): Unit = {
if (isNettyOOMOnShuffle.get() && NettyUtils.freeDirectMemory() >= freeMemoryLowerBound) {
isNettyOOMOnShuffle.compareAndSet(true, false)
}
}
/**
* This function is used to merged blocks when doBatchFetch is true. Blocks which have the
* same `mapId` can be merged into one block batch. The block batch is specified by a range
* of reduceId, which implies the continuous shuffle blocks that we can fetch in a batch.
* For example, input blocks like (shuffle_0_0_0, shuffle_0_0_1, shuffle_0_1_0) can be
* merged into (shuffle_0_0_0_2, shuffle_0_1_0_1), and input blocks like (shuffle_0_0_0_2,
* shuffle_0_0_2, shuffle_0_0_3) can be merged into (shuffle_0_0_0_4).
*
* @param blocks blocks to be merged if possible. May contains already merged blocks.
* @param doBatchFetch whether to merge blocks.
* @return the input blocks if doBatchFetch=false, or the merged blocks if doBatchFetch=true.
*/
def mergeContinuousShuffleBlockIdsIfNeeded(
blocks: Seq[FetchBlockInfo],
doBatchFetch: Boolean): Seq[FetchBlockInfo] = {
val result = if (doBatchFetch) {
val curBlocks = new ArrayBuffer[FetchBlockInfo]
val mergedBlockInfo = new ArrayBuffer[FetchBlockInfo]
def mergeFetchBlockInfo(toBeMerged: ArrayBuffer[FetchBlockInfo]): FetchBlockInfo = {
val startBlockId = toBeMerged.head.blockId.asInstanceOf[ShuffleBlockId]
// The last merged block may comes from the input, and we can merge more blocks
// into it, if the map id is the same.
def shouldMergeIntoPreviousBatchBlockId =
mergedBlockInfo.last.blockId.asInstanceOf[ShuffleBlockBatchId].mapId == startBlockId.mapId
val (startReduceId, size) =
if (mergedBlockInfo.nonEmpty && shouldMergeIntoPreviousBatchBlockId) {
// Remove the previous batch block id as we will add a new one to replace it.
val removed = mergedBlockInfo.remove(mergedBlockInfo.length - 1)
(removed.blockId.asInstanceOf[ShuffleBlockBatchId].startReduceId,
removed.size + toBeMerged.map(_.size).sum)
} else {
(startBlockId.reduceId, toBeMerged.map(_.size).sum)
}
FetchBlockInfo(
ShuffleBlockBatchId(
startBlockId.shuffleId,
startBlockId.mapId,
startReduceId,
toBeMerged.last.blockId.asInstanceOf[ShuffleBlockId].reduceId + 1),
size,
toBeMerged.head.mapIndex)
}
val iter = blocks.iterator
while (iter.hasNext) {
val info = iter.next()
// It's possible that the input block id is already a batch ID. For example, we merge some
// blocks, and then make fetch requests with the merged blocks according to "max blocks per
// request". The last fetch request may be too small, and we give up and put the remaining
// merged blocks back to the input list.
if (info.blockId.isInstanceOf[ShuffleBlockBatchId]) {
mergedBlockInfo += info
} else {
if (curBlocks.isEmpty) {
curBlocks += info
} else {
val curBlockId = info.blockId.asInstanceOf[ShuffleBlockId]
val currentMapId = curBlocks.head.blockId.asInstanceOf[ShuffleBlockId].mapId
if (curBlockId.mapId != currentMapId) {
mergedBlockInfo += mergeFetchBlockInfo(curBlocks)
curBlocks.clear()
}
curBlocks += info
}
}
}
if (curBlocks.nonEmpty) {
mergedBlockInfo += mergeFetchBlockInfo(curBlocks)
}
mergedBlockInfo
} else {
blocks
}
result.toSeq
}
/**
* The block information to fetch used in FetchRequest.
* @param blockId block id
* @param size estimated size of the block. Note that this is NOT the exact bytes.
* Size of remote block is used to calculate bytesInFlight.
* @param mapIndex the mapIndex for this block, which indicate the index in the map stage.
*/
private[storage] case class FetchBlockInfo(
blockId: BlockId,
size: Long,
mapIndex: Int)
/**
* A request to fetch blocks from a remote BlockManager.
* @param address remote BlockManager to fetch from.
* @param blocks Sequence of the information for blocks to fetch from the same address.
* @param forMergedMetas true if this request is for requesting push-merged meta information;
* false if it is for regular or shuffle chunks.
*/
case class FetchRequest(
address: BlockManagerId,
blocks: Seq[FetchBlockInfo],
forMergedMetas: Boolean = false) {
val size = blocks.map(_.size).sum
}
/**
* Result of a fetch from a remote block.
*/
private[storage] sealed trait FetchResult
/**
* Result of a fetch from a remote block successfully.
* @param blockId block id
* @param mapIndex the mapIndex for this block, which indicate the index in the map stage.
* @param address BlockManager that the block was fetched from.
* @param size estimated size of the block. Note that this is NOT the exact bytes.
* Size of remote block is used to calculate bytesInFlight.
* @param buf `ManagedBuffer` for the content.
* @param isNetworkReqDone Is this the last network request for this host in this fetch request.
*/
private[storage] case class SuccessFetchResult(
blockId: BlockId,
mapIndex: Int,
address: BlockManagerId,
size: Long,
buf: ManagedBuffer,
isNetworkReqDone: Boolean) extends FetchResult {
require(buf != null)
require(size >= 0)
}
/**
* Result of a fetch from a remote block unsuccessfully.
* @param blockId block id
* @param mapIndex the mapIndex for this block, which indicate the index in the map stage
* @param address BlockManager that the block was attempted to be fetched from
* @param e the failure exception
*/
private[storage] case class FailureFetchResult(
blockId: BlockId,
mapIndex: Int,
address: BlockManagerId,
e: Throwable)
extends FetchResult
/**
* Result of a fetch request that should be deferred for some reasons, e.g., Netty OOM
*/
private[storage]
case class DeferFetchRequestResult(fetchRequest: FetchRequest) extends FetchResult
/**
* Result of an un-successful fetch of either of these:
* 1) Remote shuffle chunk.
* 2) Local push-merged block.
*
* Instead of treating this as a [[FailureFetchResult]], we fallback to fetch the original blocks.
*
* @param blockId block id
* @param address BlockManager that the push-merged block was attempted to be fetched from
* @param size size of the block, used to update bytesInFlight.
* @param isNetworkReqDone Is this the last network request for this host in this fetch
* request. Used to update reqsInFlight.
*/
private[storage] case class FallbackOnPushMergedFailureResult(blockId: BlockId,
address: BlockManagerId,
size: Long,
isNetworkReqDone: Boolean) extends FetchResult
/**
* Result of a successful fetch of meta information for a remote push-merged block.
*
* @param shuffleId shuffle id.
* @param shuffleMergeId shuffleMergeId is used to uniquely identify merging process
* of shuffle by an indeterminate stage attempt.
* @param reduceId reduce id.
* @param blockSize size of each push-merged block.
* @param bitmaps bitmaps for every chunk.
* @param address BlockManager that the meta was fetched from.
*/
private[storage] case class PushMergedRemoteMetaFetchResult(
shuffleId: Int,
shuffleMergeId: Int,
reduceId: Int,
blockSize: Long,
bitmaps: Array[RoaringBitmap],
address: BlockManagerId) extends FetchResult
/**
* Result of a failure while fetching the meta information for a remote push-merged block.
*
* @param shuffleId shuffle id.
* @param shuffleMergeId shuffleMergeId is used to uniquely identify merging process
* of shuffle by an indeterminate stage attempt.
* @param reduceId reduce id.
* @param address BlockManager that the meta was fetched from.
*/
private[storage] case class PushMergedRemoteMetaFailedFetchResult(
shuffleId: Int,
shuffleMergeId: Int,
reduceId: Int,
address: BlockManagerId) extends FetchResult
/**
* Result of a successful fetch of meta information for a push-merged-local block.
*
* @param shuffleId shuffle id.
* @param shuffleMergeId shuffleMergeId is used to uniquely identify merging process
* of shuffle by an indeterminate stage attempt.
* @param reduceId reduce id.
* @param bitmaps bitmaps for every chunk.
* @param localDirs local directories where the push-merged shuffle files are storedl
*/
private[storage] case class PushMergedLocalMetaFetchResult(
shuffleId: Int,
shuffleMergeId: Int,
reduceId: Int,
bitmaps: Array[RoaringBitmap],
localDirs: Array[String]) extends FetchResult
}
|
chuckchen/spark
|
core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala
|
Scala
|
apache-2.0
| 71,967
|
package frameless
package ops
package deserialized
import org.scalacheck.Prop
import org.scalacheck.Prop._
class FlatMapTests extends TypedDatasetSuite {
test("flatMap") {
def prop[A: TypedEncoder, B: TypedEncoder](flatMapFunction: A => Vector[B], data: Vector[A]): Prop =
TypedDataset.create(data).
deserialized.
flatMap(flatMapFunction).
collect().run().toVector =? data.flatMap(flatMapFunction)
check(forAll(prop[Int, Int] _))
check(forAll(prop[Int, String] _))
check(forAll(prop[String, Int] _))
}
}
|
adelbertc/frameless
|
dataset/src/test/scala/frameless/ops/deserialized/FlatMapTests.scala
|
Scala
|
apache-2.0
| 557
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.singleTest
import org.jetbrains.plugins.scala.testingSupport.scalatest.generators.WordSpecGenerator
/**
* @author Roman.Shein
* @since 20.01.2015.
*/
trait WordSpecSingleTestTest extends WordSpecGenerator {
val wordSpecTestPath = List("[root]", "WordSpecTest", "WordSpecTest", "Run single test")
def testWordSpec() {
addWordSpec()
runTestByLocation(5, 10, wordSpecFileName,
checkConfigAndSettings(_, wordSpecClassName, "WordSpecTest should Run single test"),
root => checkResultTreeHasExactNamedPath(root, wordSpecTestPath:_*) &&
checkResultTreeDoesNotHaveNodes(root, "ignore other tests"),
debug = true
)
}
}
|
triggerNZ/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/singleTest/WordSpecSingleTestTest.scala
|
Scala
|
apache-2.0
| 724
|
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti AeonDB
.t1i .,::;;; ;1tt Copyright (c) 2014 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: lance.gatlin@gmail.com
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.aeondb
import scala.concurrent.ExecutionContext.Implicits.global
import org.scalatest.{FlatSpec, Matchers}
import s_mach.concurrent._
class AeonMapTest extends FlatSpec with Matchers {
implicit val metadata = Metadata(
who = "test",
why = Some("test")
)
"AeonMap" must "construct" in {
val m = Map(1 -> "a", 2 -> "b")
val p = AeonMap(m.toSeq:_*)
p.base.toMap.get should equal(m)
p.zomCommit.get should equal(Nil)
}
}
|
S-Mach/aeondb
|
src/test/scala/s_mach/aeondb/AeonMapTest.scala
|
Scala
|
apache-2.0
| 1,207
|
package org.jetbrains.plugins.scala.testingSupport.scalatest
/**
* @author Roman.Shein
* @since 11.02.2015.
*/
trait ScalaTest1GoToSourceTest extends ScalaTestGoToSourceTest {
def getSuccessfulTestPath: List[String] = List("[root]", "Successful test should run fine")
def getPendingTestPath: List[String] = List("[root]", "pending test should be pending")
//for ignored test, we launch the whole suite
def getIgnoredTestPath: List[String] = List("[root]", goToSourceClassName, "pending test should be ignored !!! IGNORED !!!")
def getFailedTestPath: List[String] = List("[root]", "failed test should fail")
//TODO: for now, scalaTest v1.x only supports 'topOfClass' location
def getSuccessfulLocationLine: Int = 2
def getPendingLocationLine: Int = 2
def getIgnoredLocationLine: Int = 2
def getFailedLocationLine: Int = 2
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/ScalaTest1GoToSourceTest.scala
|
Scala
|
apache-2.0
| 848
|
package org.nd4s.ops
import org.nd4j.autodiff.samediff.SDVariable
import org.nd4j.linalg.api.complex.IComplexNumber
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.api.ops.{Op, BaseScalarOp}
import org.nd4j.linalg.factory.Nd4j
import org.nd4s.Implicits._
object BitFilterOps {
def apply(x:INDArray,f:Double=>Boolean, g:IComplexNumber => Boolean):BitFilterOps = new BitFilterOps(x,x.length(),f,g)
}
class BitFilterOps(_x:INDArray,len:Int,f:Double => Boolean, g:IComplexNumber => Boolean) extends BaseScalarOp(_x,null:INDArray,_x,len,0) with LeftAssociativeBinaryOp {
def this(){
this(0.toScalar,0,null,null)
}
x = _x
override def opNum(): Int = -1
override def opName(): String = "bitfilter_scalar"
override def onnxName(): String = throw new UnsupportedOperationException
override def tensorflowName(): String = throw new UnsupportedOperationException
override def doDiff(f1: java.util.List[SDVariable]): java.util.List[SDVariable] = throw new UnsupportedOperationException
// override def opForDimension(index: Int, dimension: Int): Op = BitFilterOps(x.tensorAlongDimension(index,dimension),f,g)
//
// override def opForDimension(index: Int, dimension: Int*): Op = BitFilterOps(x.tensorAlongDimension(index,dimension:_*),f,g)
override def op(origin: Double): Double = if(f(origin)) 1 else 0
override def op(origin: Float): Float = if(f(origin)) 1 else 0
override def op(origin: IComplexNumber): IComplexNumber = if(g(origin)) Nd4j.createComplexNumber(1,0) else Nd4j.createComplexNumber(0, 0)
}
|
deeplearning4j/nd4s
|
src/main/scala/org/nd4s/ops/BitFilterOps.scala
|
Scala
|
apache-2.0
| 1,555
|
val list = List("ala", "ma", "kota")
val size = (0 /: list) { (sum, s) => sum + s.size }
// println size
println(size)
println(list.map(_.size).reduceLeft(_+_))
println(list.map(_.size).sum)
|
rkj/7langs7weeks
|
4scala/2day/foldl.scala
|
Scala
|
mit
| 192
|
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.commons.dao
import com.flipkart.connekt.commons.entities.AppUser
trait TUserInfo {
def getUserInfo(userId: String): Option[AppUser]
def addUserInfo(user: AppUser)
def getUserByKey(key: String): Option[AppUser]
def removeUserById(userId: String)
}
|
Flipkart/connekt
|
commons/src/main/scala/com/flipkart/connekt/commons/dao/TUserInfo.scala
|
Scala
|
mit
| 895
|
package io.reactivecqrs.core.projection
abstract class AggregateListenerActor extends ProjectionActor {
protected def receiveQuery: Receive = {
case m => ()
}
protected override def onClearProjectionData(): Unit = {
// override by child if needed
}
}
|
marpiec/ReactiveCQRS
|
core/src/main/scala/io/reactivecqrs/core/projection/AggregateListenerActor.scala
|
Scala
|
apache-2.0
| 271
|
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.utils.io
import java.io.{ByteArrayOutputStream, PrintStream}
import com.flaminem.flamy.exec.utils.ThreadPrintStream
/**
* Created by fpin on 2/19/17.
*/
trait OutputInterceptor extends AutoCloseable {
import OutputInterceptor._
def printLine(s: String): Unit
val interceptorOut: InterceptorPrintStream = new InterceptorPrintStream(printLine)
val interceptorErr: InterceptorPrintStream = new InterceptorPrintStream(printLine)
ThreadPrintStream.setOut(interceptorOut)
ThreadPrintStream.setErr(interceptorErr)
// val systemOut: PrintStream = {
// System.out match {
// case s: ThreadPrintStream =>
// val sout = s.getThreadOut
// s.setThreadOut(interceptorOut)
// sout
// case _ =>
// val sout = System.out
// System.setOut(interceptorOut)
// Console.setOut(interceptorOut)
// sout
// }
// }
//
// val systemErr: PrintStream = {
// System.err match {
// case s: ThreadPrintStream =>
// val serr = s.getThreadOut
// s.setThreadOut(interceptorErr)
// serr
// case _ =>
// val serr = System.err
// System.setOut(interceptorErr)
// Console.setOut(interceptorErr)
// serr
// }
// }
override def close(): Unit = {
ThreadPrintStream.restoreOut()
ThreadPrintStream.restoreErr()
// System.out match {
// case s: ThreadPrintStream =>
// s.setThreadOut(systemOut)
// case _ =>
// System.setOut(systemOut)
// Console.setOut(systemOut)
// }
// System.err match {
// case s: ThreadPrintStream =>
// s.setThreadOut(systemErr)
// case _ =>
// System.setErr(systemErr)
// Console.setErr(systemErr)
// }
interceptorOut.close()
interceptorErr.close()
}
}
object OutputInterceptor {
class InterceptorPrintStream(printLine: (String) => Unit) extends PrintStream(new ByteArrayOutputStream(), true) {
val buf: ByteArrayOutputStream = this.out.asInstanceOf[ByteArrayOutputStream]
override def println(): Unit = {
printLine("")
}
override def println(x: Boolean): Unit = {
printLine(x.toString)
}
override def println(x: Char): Unit = {
printLine(x.toString)
}
override def println(x: Int): Unit = {
printLine(x.toString)
}
override def println(x: Long): Unit = {
printLine(x.toString)
}
override def println(x: Float): Unit = {
printLine(x.toString)
}
override def println(x: Double): Unit = {
printLine(x.toString)
}
override def println(s: String): Unit = {
printLine(s)
}
override def println(s: Array[Char]): Unit = {
printLine(new String(s))
}
override def println(s: Any): Unit = {
printLine(s.toString)
}
override def flush(): Unit = {
if(buf.size() > 0){
printLine("<" + this.out.toString)
}
buf.reset()
}
override def toString: String = "InterceptorPrintStream"
}
}
|
flaminem/flamy
|
src/main/scala/com/flaminem/flamy/utils/io/OutputInterceptor.scala
|
Scala
|
apache-2.0
| 3,601
|
/*
* Copyright (c) 2015 Andreas Wolf
*
* See te LICENSE file in the project root for further copyright information.
*/
package info.andreaswolf.roadhopper.simulation.control
import akka.actor.{ActorLogging, ActorRef}
import akka.pattern.ask
import info.andreaswolf.roadhopper.simulation.signals.SignalBus.UpdateSignalValue
import info.andreaswolf.roadhopper.simulation.signals.{Process, SignalState}
import scala.concurrent.Future
/**
* A PT1 proportional time-invariant controller.
*
* Its transfer function is "K / (1 + Ts)", with K being the amplification and T the time constant.
*
* @param inputSignalName The name of the input signal to listen to. Note that you manually need to register an instance
* of this controller to listen to the signal.
*/
class PT1(inputSignalName: String, outputSignalName: String, timeConstant: Int, amplification: Double = 1.0,
initialValue: Double = 0.0, bus: ActorRef) extends Process(bus) with ActorLogging {
import context.dispatcher
/**
* The last time this process was invoked *before* the current time. This value does not change during one time step,
* even if the process is invoked multiple times.
*/
var lastTimeStep = 0
/**
* The time the process was last invoked. This value is updated with each update, that means it could be updated
* multiple times during one time step
*/
var lastInvocationTime = 0
/**
* The last output value
*/
var lastOutput = 0.0
override def timeAdvanced(oldTime: Int, newTime: Int): Future[Unit] = Future {
lastTimeStep = lastInvocationTime
}
/**
* The central routine of a process. This is invoked whenever a subscribed signal’s value changes.
*/
override def invoke(signals: SignalState): Future[Any] = {
val currentInput = signals.signalValue(inputSignalName, initialValue)
val deltaT = time - lastTimeStep
if (deltaT == 0) {
log.error("No time passed since last invocation: Cannot update signal")
return Future.successful()
}
val timeFactor = 1.0 / (timeConstant / deltaT + 1.0)
val newValue = timeFactor * (amplification * currentInput - lastOutput) + lastOutput
lastOutput = newValue
lastInvocationTime = time
bus ? UpdateSignalValue(outputSignalName, newValue)
}
}
|
andreaswolf/roadhopper
|
src/main/scala/info/andreaswolf/roadhopper/simulation/control/PT1.scala
|
Scala
|
mit
| 2,267
|
package ksp
class Vessel(self: ksp.Object) extends WrappedObject(self) {
assert(self.kind == "VESSEL")
def root = new Part(self.getChild("PART", self.getProperty("root").toInt))
/* Two VESSELs are equal if:
- they have the same name, and
- they have the same root part
*/
override def equals(other: Any): Boolean = other match {
case other: ksp.Vessel => {
(this eq other) || (this.self.getProperty("lct") == other.asObject.getProperty ("lct")) || (this.self == other.asObject)
}
case _ => super.equals(other)
}
}
object Vessel {
def isDebris(obj: Object) = if (obj.kind == "VESSEL") {
obj.getChild("ORBIT").getProperty("OBJ") != "0"
} else false
def isLanded(obj: Object): Boolean = if (obj.kind == "VESSEL") {
obj.testProperty("sit", """(SPLASHED|LANDED)""")
} else false
def isGhost(obj: Object): Boolean = if (obj.kind == "VESSEL") {
obj.getChildren("PART").isEmpty
} else false
}
class Part(self: ksp.Object) extends WrappedObject(self) {
assert(self.kind == "PART")
// two parts are == if they have the same UID, disregarding name
override def equals(other: Any): Boolean = other match {
case other: Part => (other.asObject.getProperty("uid") == self.getProperty("uid"))
case _ => super.equals(other)
}
}
|
ToxicFrog/kessler
|
libksp/src/Vessel.scala
|
Scala
|
mit
| 1,301
|
//
// Copyright (c) 2014 Ole Krause-Sparmann
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.postwall.api.actors
// Akka basics
import akka.actor._
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
// Spray routing and HTTP
import spray.routing.Route._
import spray.routing._
import spray.http._
import reflect.ClassTag
// Scala implicits and futures
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
// Classes for timeout setup
import akka.util.Timeout
import scala.concurrent.duration._
// DB actor, commands and model case classes
import com.postwall.db.actors.DBActor
import com.postwall.db._
import com.postwall.db.Helpers._
import spray.json.DefaultJsonProtocol._
import spray.httpx.SprayJsonSupport._
// DateTime support
import org.joda.time.DateTime
// Marshallers for db entities and other case classes
import com.postwall.api.utils.Marshallers._
// We don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class APIServiceActor extends Actor with PostwallService {
println("Postwall API service actor started")
// Set implicit execution context and set timeout
implicit def executionContext = actorRefFactory.dispatcher
// Get actor context
def actorRefFactory = context
// Create DB worker actor
val dbWorker = context.actorOf(Props[DBActor], "db-worker")
// Return db actor for PostwallService trait routes
def dbWorkerObject: ActorRef = dbWorker
// This actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
def receive = runRoute(routes)
}
// this trait defines our service behavior independently from the service actor
trait PostwallService extends HttpService with JsonMarshallers {
// Timeout used by futures (? pattern)
implicit val timeout = Timeout(60 seconds)
// Returns db actor
def dbWorkerObject: ActorRef
// We use detach() on each request handler. This executes the request processing in a separate actor thus
// does not block the service actor.
val routes = {
// Wrap all directives in exception handler directive
path("ping") {
get {
detach() {
complete("pong")
}
}
}~
path("posts") {
post {
// Parse WallPostParams entity from JSON
entity(as[WallPostParams]) { params =>
// Process in another actor (do not block this service actor)
detach() {
handleExceptions( ExceptionHandler {case _: Exception => complete(StatusCodes.BadRequest, APIError(errorCode=APIErrorCodes.UNKNOWN_ERROR, errorDescripton="Could not post"))} ) {
// Ask (?) db actor for result and map that future to Option[WallPost] and call get on it to get the actual post.
complete( (dbWorkerObject ? DBActor.PostToWall(params)).mapTo[Option[WallPost]].map(x => x.get))
}
}
}
}~
get {
detach() {
handleExceptions( ExceptionHandler {case _: Exception => complete(StatusCodes.BadRequest, APIError(errorCode=APIErrorCodes.UNKNOWN_ERROR, errorDescripton="Could not get posts"))} ) {
// Ask (?) db actor for result and map that future to Option[List[WallPost]] and call get on it to get the actual list.
complete( (dbWorkerObject ? DBActor.GetPosts).mapTo[Option[List[WallPost]]].map(x => x.get))
}
}
}
}
}
}
|
pixelogik/postwall
|
postwall/api/src/main/scala/com/postwall/api/actors/APIServiceActor.scala
|
Scala
|
mit
| 4,569
|
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.binders
import org.scalatest.{EitherValues, Matchers, OptionValues, WordSpecLike}
import uk.gov.hmrc.play.binders.ContinueUrl._
class ContinueUrlSpec extends WordSpecLike with Matchers with EitherValues with OptionValues {
"isAbsoluteUrl" should {
"return true for an absolute URL" in {
ContinueUrl("http://www.example.com").isAbsoluteUrl shouldBe true
}
"return false for a relative URL" in {
ContinueUrl("/service/page").isAbsoluteUrl shouldBe false
}
}
"isRelativeUrl" should {
"return false for an absolute URL" in {
ContinueUrl("http://www.example.com").isRelativeUrl shouldBe false
}
"return true for a relative URL" in {
ContinueUrl("/service/page").isRelativeUrl shouldBe true
}
}
"not work for protocol-relative urls" in {
an[IllegalArgumentException] should be thrownBy ContinueUrl("//some/value?with=query")
an[IllegalArgumentException] should be thrownBy ContinueUrl("///some/value?with=query")
an[IllegalArgumentException] should be thrownBy ContinueUrl("////some/value?with=query")
}
"not work for urls with @" in {
an[IllegalArgumentException] should be thrownBy ContinueUrl("/some/value?with=query@meh")
}
"not work for urls with /\\\\" in {
an[IllegalArgumentException] should be thrownBy ContinueUrl("/\\\\www.example.com")
}
"not work for path-relative urls" in {
an[IllegalArgumentException] should be thrownBy ContinueUrl("some/value?with=query")
}
"not work for non-urls" in {
an[IllegalArgumentException] should be thrownBy ContinueUrl("someasdfasdfa")
}
"encodedUrl should produce the expected result" in {
ContinueUrl("/some/value?with=query").encodedUrl shouldBe "%2Fsome%2Fvalue%3Fwith%3Dquery"
}
"Binding a continue URL" should {
"work for host-relative URLs" in {
val url = "/some/value"
queryBinder.bind("continue", Map("continue" -> Seq(url))).value.right.value should be(ContinueUrl(url))
}
"work for host-relative URLs with query Params" in {
val url = "/some/value?with=query"
queryBinder.bind("continue", Map("continue" -> Seq(url))).value.right.value should be(ContinueUrl(url))
}
"not work for path-relative urls" in {
val url = "some/value?with=query"
queryBinder.bind("continue", Map("continue" -> Seq(url))).value.left.value should be(s"'$url' is not a valid continue URL")
}
"not work for non-urls" in {
val url = "::"
queryBinder.bind("continue", Map("continue" -> Seq(url))).value.left.value should be(s"'$url' is not a valid continue URL")
}
}
"Unbinding a continue URL" should {
"return the value" in {
queryBinder.unbind("continue", ContinueUrl("/some/url")) should be("continue=%2Fsome%2Furl")
}
}
}
|
nicf82/play-ui
|
src/test/scala/uk/gov/hmrc/play/binders/ContinueUrlSpec.scala
|
Scala
|
apache-2.0
| 3,413
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.