code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <max.c.lv@gmail.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import java.io.File
import java.lang.reflect.{InvocationTargetException, Method}
import java.util.Locale
import android.app._
import android.content._
import android.content.pm.{PackageInfo, PackageManager}
import android.net.{Network, ConnectivityManager}
import android.os._
import android.support.v4.app.NotificationCompat
import android.util.{SparseArray, Log}
import android.widget.Toast
import com.github.shadowsocks.aidl.Config
import com.github.shadowsocks.utils._
import com.google.android.gms.analytics.HitBuilders
import org.apache.http.conn.util.InetAddressUtils
import scala.collection._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ops._
class ShadowsocksNatService extends Service with BaseService {
val TAG = "ShadowsocksNatService"
val CMD_IPTABLES_RETURN = " -t nat -A OUTPUT -p tcp -d 0.0.0.0 -j RETURN"
val CMD_IPTABLES_DNAT_ADD_SOCKS = " -t nat -A OUTPUT -p tcp " +
"-j DNAT --to-destination 127.0.0.1:8123"
private val mStartForegroundSignature = Array[Class[_]](classOf[Int], classOf[Notification])
private val mStopForegroundSignature = Array[Class[_]](classOf[Boolean])
private val mSetForegroundSignature = Array[Class[_]](classOf[Boolean])
private val mSetForegroundArgs = new Array[AnyRef](1)
private val mStartForegroundArgs = new Array[AnyRef](2)
private val mStopForegroundArgs = new Array[AnyRef](1)
var lockReceiver: BroadcastReceiver = null
var closeReceiver: BroadcastReceiver = null
var connReceiver: BroadcastReceiver = null
var notificationManager: NotificationManager = null
var config: Config = null
var apps: Array[ProxiedApp] = null
val myUid = Process.myUid()
private var mSetForeground: Method = null
private var mStartForeground: Method = null
private var mStopForeground: Method = null
private lazy val application = getApplication.asInstanceOf[ShadowsocksApplication]
private val dnsAddressCache = new SparseArray[String]
def getNetId(network: Network): Int = {
network.getClass.getDeclaredField("netId").get(network).asInstanceOf[Int]
}
def restoreDnsForAllNetwork() {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val netId = getNetId(network)
val oldDns = dnsAddressCache.get(netId)
if (oldDns != null) {
cmdBuf.append("ndc resolver setnetdns %d \\"\\" %s".formatLocal(Locale.ENGLISH, netId, oldDns))
dnsAddressCache.remove(netId)
}
})
if (cmdBuf.nonEmpty) Console.runRootCommand(cmdBuf.toArray)
}
def setDnsForAllNetwork(dns: String) {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
if (networks == null) return
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val networkInfo = manager.getNetworkInfo(network)
if (networkInfo == null) return
if (networkInfo.isConnected) {
val netId = getNetId(network)
val curDnsList = manager.getLinkProperties(network).getDnsServers
if (curDnsList != null) {
import scala.collection.JavaConverters._
val curDns = curDnsList.asScala.map(ip => ip.getHostAddress).mkString(" ")
if (curDns != dns) {
dnsAddressCache.put(netId, curDns)
cmdBuf.append("ndc resolver setnetdns %d \\"\\" %s".formatLocal(Locale.ENGLISH, netId, dns))
}
}
}
})
if (cmdBuf.nonEmpty) Console.runRootCommand(cmdBuf.toArray)
}
def setupDns() {
setDnsForAllNetwork("127.0.0.1")
}
def resetDns() = {
restoreDnsForAllNetwork()
}
def flushDns() {
if (Utils.isLollipopOrAbove) {
val manager = getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networks = manager.getAllNetworks
val cmdBuf = new ArrayBuffer[String]()
networks.foreach(network => {
val networkInfo = manager.getNetworkInfo(network)
if (networkInfo.isAvailable) {
val netId = network.getClass.getDeclaredField("netId").get(network).asInstanceOf[Int]
cmdBuf.append("ndc resolver flushnet %d".formatLocal(Locale.ENGLISH, netId))
}
})
Console.runRootCommand(cmdBuf.toArray)
} else {
Console.runRootCommand(Array("ndc resolver flushdefaultif", "ndc resolver flushif wlan0"))
}
}
def destroyConnectionReceiver() {
if (connReceiver != null) {
unregisterReceiver(connReceiver)
connReceiver = null
}
resetDns()
}
def initConnectionReceiver() {
val filter = new IntentFilter(ConnectivityManager.CONNECTIVITY_ACTION)
connReceiver = new BroadcastReceiver {
override def onReceive(context: Context, intent: Intent) = {
setupDns()
}
}
registerReceiver(connReceiver, filter)
}
def startShadowsocksDaemon() {
if (config.route != Route.ALL) {
val acl: Array[String] = config.route match {
case Route.BYPASS_LAN => getResources.getStringArray(R.array.private_route)
case Route.BYPASS_CHN => getResources.getStringArray(R.array.chn_route_full)
}
ConfigUtils.printToFile(new File(Path.BASE + "acl.list"))(p => {
acl.foreach(item => p.println(item))
})
}
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, config.localPort,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-local-nat.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd += (Path.BASE + "ss-local"
, "-b" , "127.0.0.1"
, "-t" , "600"
, "-c" , Path.BASE + "ss-local-nat.conf"
, "-f" , Path.BASE + "ss-local-nat.pid")
if (config.isAuth) cmd += "-A"
if (config.route != Route.ALL) {
cmd += "--acl"
cmd += (Path.BASE + "acl.list")
}
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
Console.runCommand(cmd.mkString(" "))
}
def startTunnel() {
if (config.isUdpDns) {
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, 8153,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-tunnel-nat.conf"))(p => {
p.println(conf)
})
val cmd = new ArrayBuffer[String]
cmd += (Path.BASE + "ss-tunnel"
, "-u"
, "-t" , "10"
, "-b" , "127.0.0.1"
, "-L" , "8.8.8.8:53"
, "-c" , Path.BASE + "ss-tunnel-nat.conf"
, "-f" , Path.BASE + "ss-tunnel-nat.pid")
cmd += ("-l" , "8153")
if (config.isAuth) cmd += "-A"
if (BuildConfig.DEBUG) Log.d(TAG, cmd.mkString(" "))
Console.runCommand(cmd.mkString(" "))
} else {
val conf = ConfigUtils
.SHADOWSOCKS.formatLocal(Locale.ENGLISH, config.proxy, config.remotePort, 8163,
config.sitekey, config.encMethod, 10)
ConfigUtils.printToFile(new File(Path.BASE + "ss-tunnel-nat.conf"))(p => {
p.println(conf)
})
val cmdBuf = new ArrayBuffer[String]
cmdBuf += (Path.BASE + "ss-tunnel"
, "-u"
, "-t" , "10"
, "-b" , "127.0.0.1"
, "-l" , "8163"
, "-L" , "8.8.8.8:53"
, "-c" , Path.BASE + "ss-tunnel-nat.conf"
, "-f" , Path.BASE + "ss-tunnel-nat.pid")
if (config.isAuth) cmdBuf += "-A"
if (BuildConfig.DEBUG) Log.d(TAG, cmdBuf.mkString(" "))
Console.runCommand(cmdBuf.mkString(" "))
}
}
def startDnsDaemon() {
val conf = if (config.route == Route.BYPASS_CHN) {
val reject = ConfigUtils.getRejectList(getContext, application)
val blackList = ConfigUtils.getBlackList(getContext, application)
ConfigUtils.PDNSD_DIRECT.formatLocal(Locale.ENGLISH, "127.0.0.1", 8153,
Path.BASE + "pdnsd-nat.pid", reject, blackList, 8163)
} else {
ConfigUtils.PDNSD_LOCAL.formatLocal(Locale.ENGLISH, "127.0.0.1", 8153,
Path.BASE + "pdnsd-nat.pid", 8163)
}
ConfigUtils.printToFile(new File(Path.BASE + "pdnsd-nat.conf"))(p => {
p.println(conf)
})
val cmd = Path.BASE + "pdnsd -c " + Path.BASE + "pdnsd-nat.conf"
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
Console.runCommand(cmd)
}
def getVersionName: String = {
var version: String = null
try {
val pi: PackageInfo = getPackageManager.getPackageInfo(getPackageName, 0)
version = pi.versionName
} catch {
case e: PackageManager.NameNotFoundException =>
version = "Package name not found"
}
version
}
def startRedsocksDaemon() {
val conf = ConfigUtils.REDSOCKS.formatLocal(Locale.ENGLISH, config.localPort)
val cmd = Path.BASE + "redsocks -p %sredsocks-nat.pid -c %sredsocks-nat.conf"
.formatLocal(Locale.ENGLISH, Path.BASE, Path.BASE)
ConfigUtils.printToFile(new File(Path.BASE + "redsocks-nat.conf"))(p => {
p.println(conf)
})
if (BuildConfig.DEBUG) Log.d(TAG, cmd)
Console.runCommand(cmd)
}
/** Called when the activity is first created. */
def handleConnection: Boolean = {
startTunnel()
if (!config.isUdpDns) startDnsDaemon()
startRedsocksDaemon()
startShadowsocksDaemon()
setupIptables()
true
}
def invokeMethod(method: Method, args: Array[AnyRef]) {
try {
method.invoke(this, mStartForegroundArgs: _*)
} catch {
case e: InvocationTargetException =>
Log.w(TAG, "Unable to invoke method", e)
case e: IllegalAccessException =>
Log.w(TAG, "Unable to invoke method", e)
}
}
def notifyForegroundAlert(title: String, info: String, visible: Boolean) {
val openIntent = new Intent(this, classOf[Shadowsocks])
openIntent.setFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT)
val contentIntent = PendingIntent.getActivity(this, 0, openIntent, 0)
val closeIntent = new Intent(Action.CLOSE)
val actionIntent = PendingIntent.getBroadcast(this, 0, closeIntent, 0)
val builder = new NotificationCompat.Builder(this)
builder
.setWhen(0)
.setTicker(title)
.setContentTitle(getString(R.string.app_name))
.setContentText(info)
.setContentIntent(contentIntent)
.setSmallIcon(R.drawable.ic_stat_shadowsocks)
.addAction(android.R.drawable.ic_menu_close_clear_cancel, getString(R.string.stop),
actionIntent)
if (visible)
builder.setPriority(NotificationCompat.PRIORITY_DEFAULT)
else
builder.setPriority(NotificationCompat.PRIORITY_MIN)
startForegroundCompat(1, builder.build)
}
def onBind(intent: Intent): IBinder = {
Log.d(TAG, "onBind")
if (Action.SERVICE == intent.getAction) {
binder
} else {
null
}
}
override def onCreate() {
super.onCreate()
ConfigUtils.refresh(this)
notificationManager = this
.getSystemService(Context.NOTIFICATION_SERVICE)
.asInstanceOf[NotificationManager]
try {
mStartForeground = getClass.getMethod("startForeground", mStartForegroundSignature: _*)
mStopForeground = getClass.getMethod("stopForeground", mStopForegroundSignature: _*)
} catch {
case e: NoSuchMethodException =>
mStartForeground = {
mStopForeground = null
mStopForeground
}
}
try {
mSetForeground = getClass.getMethod("setForeground", mSetForegroundSignature: _*)
} catch {
case e: NoSuchMethodException =>
throw new IllegalStateException(
"OS doesn't have Service.startForeground OR Service.setForeground!")
}
}
def killProcesses() {
val cmd = new ArrayBuffer[String]()
for (task <- Array("ss-local", "ss-tunnel", "pdnsd", "redsocks")) {
cmd.append("chmod 666 %s%s-nat.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
}
Console.runRootCommand(cmd.toArray)
cmd.clear()
for (task <- Array("ss-local", "ss-tunnel", "pdnsd", "redsocks")) {
try {
val pid = scala.io.Source.fromFile(Path.BASE + task + "-nat.pid").mkString.trim.toInt
cmd.append("kill -9 %d".formatLocal(Locale.ENGLISH, pid))
Process.killProcess(pid)
} catch {
case e: Throwable => Log.e(TAG, "unable to kill " + task)
}
cmd.append("rm -f %s%s-nat.pid".formatLocal(Locale.ENGLISH, Path.BASE, task))
cmd.append("rm -f %s%s-nat.conf".formatLocal(Locale.ENGLISH, Path.BASE, task))
}
Console.runRootCommand(cmd.toArray)
Console.runRootCommand(Utils.getIptables + " -t nat -F OUTPUT")
}
def setupIptables() = {
val init_sb = new ArrayBuffer[String]
val http_sb = new ArrayBuffer[String]
init_sb.append("ulimit -n 4096")
init_sb.append(Utils.getIptables + " -t nat -F OUTPUT")
val cmd_bypass = Utils.getIptables + CMD_IPTABLES_RETURN
if (!InetAddressUtils.isIPv6Address(config.proxy.toUpperCase)) {
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-d " + config.proxy))
}
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-d 127.0.0.1"))
init_sb.append(cmd_bypass.replace("-p tcp -d 0.0.0.0", "-m owner --uid-owner " + myUid))
init_sb.append(cmd_bypass.replace("-d 0.0.0.0", "--dport 53"))
init_sb.append(Utils.getIptables
+ " -t nat -A OUTPUT -p udp --dport 53 -j DNAT --to-destination 127.0.0.1:" + 8153)
if (config.isGlobalProxy || config.isBypassApps) {
http_sb.append(Utils.getIptables + CMD_IPTABLES_DNAT_ADD_SOCKS)
}
if (!config.isGlobalProxy) {
if (apps == null || apps.length <= 0) {
apps = AppManager.getProxiedApps(this, config.proxiedAppString)
}
val uidSet: mutable.HashSet[Int] = new mutable.HashSet[Int]
for (app <- apps) {
if (app.proxied) {
uidSet.add(app.uid)
}
}
for (uid <- uidSet) {
if (!config.isBypassApps) {
http_sb.append((Utils.getIptables + CMD_IPTABLES_DNAT_ADD_SOCKS).replace("-t nat", "-t nat -m owner --uid-owner " + uid))
} else {
init_sb.append(cmd_bypass.replace("-d 0.0.0.0", "-m owner --uid-owner " + uid))
}
}
}
Console.runRootCommand(init_sb.toArray)
Console.runRootCommand(http_sb.toArray)
}
/**
* This is a wrapper around the new startForeground method, using the older
* APIs if it is not available.
*/
def startForegroundCompat(id: Int, notification: Notification) {
if (mStartForeground != null) {
mStartForegroundArgs(0) = int2Integer(id)
mStartForegroundArgs(1) = notification
invokeMethod(mStartForeground, mStartForegroundArgs)
return
}
mSetForegroundArgs(0) = boolean2Boolean(x = true)
invokeMethod(mSetForeground, mSetForegroundArgs)
notificationManager.notify(id, notification)
}
/**
* This is a wrapper around the new stopForeground method, using the older
* APIs if it is not available.
*/
def stopForegroundCompat(id: Int) {
if (mStopForeground != null) {
mStopForegroundArgs(0) = boolean2Boolean(x = true)
try {
mStopForeground.invoke(this, mStopForegroundArgs: _*)
} catch {
case e: InvocationTargetException =>
Log.w(TAG, "Unable to invoke stopForeground", e)
case e: IllegalAccessException =>
Log.w(TAG, "Unable to invoke stopForeground", e)
}
return
}
notificationManager.cancel(id)
mSetForegroundArgs(0) = boolean2Boolean(x = false)
invokeMethod(mSetForeground, mSetForegroundArgs)
}
override def startRunner(c: Config) {
config = c
// register close receiver
val filter = new IntentFilter()
filter.addAction(Intent.ACTION_SHUTDOWN)
filter.addAction(Action.CLOSE)
closeReceiver = new BroadcastReceiver() {
def onReceive(context: Context, intent: Intent) {
Toast.makeText(context, R.string.stopping, Toast.LENGTH_SHORT).show()
stopRunner()
}
}
registerReceiver(closeReceiver, filter)
if (Utils.isLollipopOrAbove) {
val screenFilter = new IntentFilter()
screenFilter.addAction(Intent.ACTION_SCREEN_ON)
screenFilter.addAction(Intent.ACTION_SCREEN_OFF)
screenFilter.addAction(Intent.ACTION_USER_PRESENT)
lockReceiver = new BroadcastReceiver() {
def onReceive(context: Context, intent: Intent) {
if (getState == State.CONNECTED) {
val action = intent.getAction
if (action == Intent.ACTION_SCREEN_OFF) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), false)
} else if (action == Intent.ACTION_SCREEN_ON) {
val keyGuard = getSystemService(Context.KEYGUARD_SERVICE).asInstanceOf[KeyguardManager]
if (!keyGuard.inKeyguardRestrictedInputMode) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
}
} else if (action == Intent.ACTION_USER_PRESENT) {
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
}
}
}
}
registerReceiver(lockReceiver, screenFilter)
}
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(TAG)
.setAction("start")
.setLabel(getVersionName)
.build())
changeState(State.CONNECTING)
spawn {
if (config.proxy == "198.199.101.152") {
val holder = application.containerHolder
try {
config = ConfigUtils.getPublicConfig(getBaseContext, holder.getContainer, config)
} catch {
case ex: Exception =>
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
config = null
}
}
if (config != null) {
// Clean up
killProcesses()
var resolved: Boolean = false
if (!InetAddressUtils.isIPv4Address(config.proxy) &&
!InetAddressUtils.isIPv6Address(config.proxy)) {
Utils.resolve(config.proxy, enableIPv6 = true) match {
case Some(a) =>
config.proxy = a
resolved = true
case None => resolved = false
}
} else {
resolved = true
}
if (resolved && handleConnection) {
// Set DNS
flushDns()
notifyForegroundAlert(getString(R.string.forward_success),
getString(R.string.service_running).formatLocal(Locale.ENGLISH, config.profileName), true)
changeState(State.CONNECTED)
} else {
changeState(State.STOPPED, getString(R.string.service_failed))
stopRunner()
}
}
}
}
override def stopRunner() {
// channge the state
changeState(State.STOPPING)
// clean up recevier
if (closeReceiver != null) {
unregisterReceiver(closeReceiver)
closeReceiver = null
}
if (Utils.isLollipopOrAbove) {
if (lockReceiver != null) {
unregisterReceiver(lockReceiver)
lockReceiver = null
}
}
// send event
application.tracker.send(new HitBuilders.EventBuilder()
.setCategory(TAG)
.setAction("stop")
.setLabel(getVersionName)
.build())
// reset NAT
killProcesses()
// stop the service if no callback registered
if (getCallbackCount == 0) {
stopSelf()
}
stopForegroundCompat(1)
// change the state
changeState(State.STOPPED)
}
override def stopBackgroundService() {
stopSelf()
}
override def getTag = TAG
override def getServiceMode = Mode.NAT
override def getContext = getBaseContext
}
|
sjzhao/shadowsocks-android
|
src/main/scala/com/github/shadowsocks/ShadowsocksNatService.scala
|
Scala
|
gpl-3.0
| 21,895
|
package org.bitcoins.node.networking.peer
import org.bitcoins.chain.api.ChainApi
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.core.crypto.DoubleSha256DigestBE
import org.bitcoins.core.p2p._
import org.bitcoins.core.protocol.blockchain.{Block, MerkleBlock}
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models.BroadcastAbleTransactionDAO
import org.bitcoins.node.{P2PLogger, SpvNodeCallbacks}
import slick.jdbc.SQLiteProfile
import scala.concurrent.{ExecutionContext, Future}
/** This actor is meant to handle a [[org.bitcoins.core.p2p.DataPayload DataPayload]]
* that a peer to sent to us on the p2p network, for instance, if we a receive a
* [[org.bitcoins.core.p2p.HeadersMessage HeadersMessage]] we should store those headers in our database
*/
case class DataMessageHandler(
chainApi: ChainApi,
callbacks: SpvNodeCallbacks,
receivedFilterCount: Int = 0,
syncing: Boolean = false)(
implicit ec: ExecutionContext,
appConfig: NodeAppConfig,
chainConfig: ChainAppConfig)
extends P2PLogger {
private val txDAO = BroadcastAbleTransactionDAO(SQLiteProfile)
def handleDataPayload(
payload: DataPayload,
peerMsgSender: PeerMessageSender): Future[DataMessageHandler] = {
payload match {
case checkpoint: CompactFilterCheckPointMessage =>
logger.debug(
s"Got ${checkpoint.filterHeaders.size} checkpoints ${checkpoint}")
for {
newChainApi <- chainApi.processCheckpoints(
checkpoint.filterHeaders.map(_.flip),
checkpoint.stopHash.flip)
} yield {
this.copy(chainApi = newChainApi)
}
case filterHeader: CompactFilterHeadersMessage =>
logger.info(
s"Got ${filterHeader.filterHashes.size} compact filter header hashes")
val filterHeaders = filterHeader.filterHeaders
for {
newChainApi <- chainApi.processFilterHeaders(
filterHeaders,
filterHeader.stopHash.flip)
newSyncing <- if (filterHeaders.size == chainConfig.filterHeaderBatchSize) {
logger.info(
s"Received maximum amount of filter headers in one header message. This means we are not synced, requesting more")
sendNextGetCompactFilterHeadersCommand(
peerMsgSender,
filterHeader.stopHash.flip).map(_ => syncing)
} else {
logger.debug(
s"Received filter headers=${filterHeaders.size} in one message, " +
"which is less than max. This means we are synced.")
sendFirstGetCompactFilterCommand(peerMsgSender).map { synced =>
if (!synced)
logger.info("We are synced")
synced
}
}
} yield {
this.copy(chainApi = newChainApi, syncing = newSyncing)
}
case filter: CompactFilterMessage =>
logger.debug(s"Received ${filter.commandName}, $filter")
for {
(newCount, newSyncing) <- if (receivedFilterCount == chainConfig.filterBatchSize - 1) {
logger.info(
s"Received maximum amount of filters in one batch. This means we are not synced, requesting more")
for {
_ <- sendNextGetCompactFilterCommand(peerMsgSender,
filter.blockHash.flip)
} yield (0, syncing)
} else {
for {
filterHeaderCount <- chainApi.getFilterHeaderCount
filterCount <- chainApi.getFilterCount
} yield {
val syncing = filterCount < filterHeaderCount - 1
if (!syncing) {
logger.info(s"We are synced")
}
(receivedFilterCount + 1, syncing)
}
}
newChainApi <- chainApi.processFilter(filter)
} yield {
this.copy(chainApi = newChainApi,
receivedFilterCount = newCount,
syncing = newSyncing)
}
case notHandling @ (MemPoolMessage | _: GetHeadersMessage |
_: GetBlocksMessage | _: GetCompactFiltersMessage |
_: GetCompactFilterHeadersMessage |
_: GetCompactFilterCheckPointMessage) =>
logger.debug(s"Received ${notHandling.commandName} message, skipping ")
Future.successful(this)
case getData: GetDataMessage =>
logger.info(
s"Received a getdata message for inventories=${getData.inventories}")
getData.inventories.foreach { inv =>
logger.debug(s"Looking for inv=$inv")
inv.typeIdentifier match {
case TypeIdentifier.MsgTx =>
txDAO.findByHash(inv.hash).map {
case Some(tx) =>
peerMsgSender.sendTransactionMessage(tx.transaction)
case None =>
logger.warn(
s"Got request to send data with hash=${inv.hash}, but found nothing")
}
case other @ (TypeIdentifier.MsgBlock |
TypeIdentifier.MsgFilteredBlock |
TypeIdentifier.MsgCompactBlock |
TypeIdentifier.MsgFilteredWitnessBlock |
TypeIdentifier.MsgWitnessBlock | TypeIdentifier.MsgWitnessTx) =>
logger.warn(
s"Got request to send data type=$other, this is not implemented yet")
case unassigned: MsgUnassigned =>
logger.warn(
s"Received unassigned message we do not understand, msg=${unassigned}")
}
}
Future.successful(this)
case HeadersMessage(count, headers) =>
logger.info(s"Received headers message with ${count.toInt} headers")
logger.trace(
s"Received headers=${headers.map(_.hashBE.hex).mkString("[", ",", "]")}")
val chainApiF = chainApi.processHeaders(headers)
if (appConfig.isSPVEnabled) {
logger.trace(s"Requesting data for headers=${headers.length}")
peerMsgSender.sendGetDataMessage(headers: _*)
}
val getHeadersF = chainApiF
.flatMap { newApi =>
if (headers.nonEmpty) {
val lastHeader = headers.last
val lastHash = lastHeader.hash
newApi.getBlockCount.map { count =>
logger.trace(
s"Processed headers, most recent has height=$count and hash=$lastHash.")
}
if (count.toInt == HeadersMessage.MaxHeadersCount) {
logger.info(
s"Received maximum amount of headers in one header message. This means we are not synced, requesting more")
peerMsgSender
.sendGetHeadersMessage(lastHash)
.map(_ => syncing)
} else {
logger.debug(
List(s"Received headers=${count.toInt} in one message,",
"which is less than max. This means we are synced,",
"not requesting more.")
.mkString(" "))
if (appConfig.isNeutrinoEnabled && !syncing)
sendFirstGetCompactFilterHeadersCommand(peerMsgSender)
else
Future.successful(syncing)
}
} else
Future.successful(syncing)
}
getHeadersF.failed.map { err =>
logger.error(s"Error when processing headers message", err)
}
for {
newApi <- chainApiF
newSyncing <- getHeadersF
} yield {
this.copy(chainApi = newApi, syncing = newSyncing)
}
case msg: BlockMessage =>
Future {
callbacks.onBlockReceived.foreach(_.apply(msg.block))
this
}
case TransactionMessage(tx) =>
val belongsToMerkle =
MerkleBuffers.putTx(tx, callbacks.onMerkleBlockReceived)
if (belongsToMerkle) {
logger.trace(
s"Transaction=${tx.txIdBE} belongs to merkleblock, not calling callbacks")
Future.successful(this)
} else {
logger.trace(
s"Transaction=${tx.txIdBE} does not belong to merkleblock, processing given callbacks")
Future {
callbacks.onTxReceived.foreach(_.apply(tx))
this
}
}
case MerkleBlockMessage(merkleBlock) =>
MerkleBuffers.putMerkle(merkleBlock)
Future.successful(this)
case invMsg: InventoryMessage =>
handleInventoryMsg(invMsg = invMsg, peerMsgSender = peerMsgSender)
}
}
private def sendNextGetCompactFilterHeadersCommand(
peerMsgSender: PeerMessageSender,
stopHash: DoubleSha256DigestBE): Future[Boolean] = {
for {
nextRangeOpt <- chainApi.nextHeaderBatchRange(
stopHash,
chainConfig.filterHeaderBatchSize)
res <- nextRangeOpt match {
case Some((startHeight, stopHash)) =>
logger.info(
s"Requesting compact filter headers from=$startHeight to=${stopHash.flip}")
peerMsgSender
.sendGetCompactFilterHeadersMessage(startHeight, stopHash)
.map(_ => true)
case None =>
Future.successful(false)
}
} yield res
}
private def sendFirstGetCompactFilterHeadersCommand(
peerMsgSender: PeerMessageSender): Future[Boolean] =
for {
filterHeaderCount <- chainApi.getFilterHeaderCount
highestFilterHeaderOpt <- chainApi
.getFilterHeadersAtHeight(filterHeaderCount)
.map(_.headOption)
highestFilterBlockHash = highestFilterHeaderOpt
.map(_.blockHashBE)
.getOrElse(DoubleSha256DigestBE.empty)
res <- sendNextGetCompactFilterHeadersCommand(peerMsgSender,
highestFilterBlockHash)
} yield res
private def sendNextGetCompactFilterCommand(
peerMsgSender: PeerMessageSender,
stopHash: DoubleSha256DigestBE): Future[Boolean] = {
for {
nextRangeOpt <- chainApi.nextFilterHeaderBatchRange(
stopHash,
chainConfig.filterBatchSize)
res <- nextRangeOpt match {
case Some((startHeight, stopHash)) =>
logger.info(
s"Requesting compact filters from=$startHeight to=${stopHash.flip}")
peerMsgSender
.sendGetCompactFiltersMessage(startHeight, stopHash)
.map(_ => true)
case None =>
Future.successful(false)
}
} yield res
}
private def sendFirstGetCompactFilterCommand(
peerMsgSender: PeerMessageSender): Future[Boolean] =
for {
filterCount <- chainApi.getFilterCount
highestFilterOpt <- chainApi
.getFiltersAtHeight(filterCount)
.map(_.headOption)
highestFilterBlockHash = highestFilterOpt
.map(_.blockHashBE)
.getOrElse(DoubleSha256DigestBE.empty)
res <- sendNextGetCompactFilterCommand(peerMsgSender,
highestFilterBlockHash)
} yield res
private def handleInventoryMsg(
invMsg: InventoryMessage,
peerMsgSender: PeerMessageSender): Future[DataMessageHandler] = {
logger.info(s"Received inv=${invMsg}")
val getData = GetDataMessage(invMsg.inventories.map {
case Inventory(TypeIdentifier.MsgBlock, hash) =>
Inventory(TypeIdentifier.MsgFilteredBlock, hash)
case other: Inventory => other
})
peerMsgSender.sendMsg(getData)
Future.successful(this)
}
}
object DataMessageHandler {
/** Callback for handling a received block */
type OnBlockReceived = Block => Unit
/** Callback for handling a received Merkle block with its corresponding TXs */
type OnMerkleBlockReceived = (MerkleBlock, Vector[Transaction]) => Unit
/** Callback for handling a received transaction */
type OnTxReceived = Transaction => Unit
/** Does nothing */
def noop[T]: T => Unit = _ => ()
}
|
bitcoin-s/bitcoin-s-core
|
node/src/main/scala/org/bitcoins/node/networking/peer/DataMessageHandler.scala
|
Scala
|
mit
| 12,070
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.partition
import java.sql.Timestamp
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
import org.apache.spark.sql.Row
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
class TestShowPartition extends QueryTest with BeforeAndAfterAll {
override def beforeAll = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
sql("drop table if exists notPartitionTable")
sql("""
| CREATE TABLE notPartitionTable
| (
| vin String,
| logdate Timestamp,
| phonenumber Int,
| country String,
| area String
| )
| STORED BY 'carbondata'
""".stripMargin)
sql("drop table if exists hashTable")
sql(
"""
| CREATE TABLE hashTable (empname String, designation String, doj Timestamp,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (empno int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='3')
""".stripMargin)
sql("drop table if exists rangeTable")
sql(
"""
| CREATE TABLE rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='01-01-2010, 01-01-2015')
""".stripMargin)
sql("drop table if exists listTable")
sql(
"""
| CREATE TABLE listTable (empno int, empname String, designation String, doj Timestamp,
| workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (workgroupcategory int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='0, 1, (2, 3)')
""".stripMargin)
sql(s"CREATE DATABASE if not exists partitionDB")
sql("drop table if exists partitionDB.hashTable")
sql("drop table if exists partitionDB.rangeTable")
sql("drop table if exists partitionDB.listTable")
sql(
"""
| CREATE TABLE partitionDB.hashTable (empname String, designation String, doj Timestamp,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (empno int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='3')
""".stripMargin)
sql(
"""
| CREATE TABLE partitionDB.rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='01-01-2010, 01-01-2015')
""".stripMargin)
sql(
"""
| CREATE TABLE partitionDB.listTable (empno int, empname String, designation String,
| doj Timestamp,workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (workgroupcategory int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='0, 1, (2, 3)')
""".stripMargin)
sql("DROP TABLE IF EXISTS hiveTable")
sql("""
| create table hiveTable(id int, name string) partitioned by (city string)
| row format delimited fields terminated by ','
""".stripMargin)
sql("alter table hiveTable add partition (city = 'Hangzhou')")
sql(s"CREATE DATABASE if not exists hiveDB")
sql("DROP TABLE IF EXISTS hiveDB.hiveTable")
sql("""
| create table hiveDB.hiveTable(id int, name string) partitioned by (city string)
| row format delimited fields terminated by ','
""".stripMargin)
sql("alter table hiveDB.hiveTable add partition (city = 'Shanghai')")
}
test("show partition table: exception when show not partition table") {
val errorMessage =
intercept[AnalysisException] { sql("show partitions notPartitionTable").show() }
assert(errorMessage.getMessage.contains(
"SHOW PARTITIONS is not allowed on a table that is not partitioned: notpartitiontable"))
}
test("show partition table: hash table") {
// EqualTo
checkAnswer(sql("show partitions hashTable"), Seq(Row("empno = HASH_NUMBER(3)")))
}
test("show partition table: range partition") {
// EqualTo
checkAnswer(sql("show partitions rangeTable"), Seq(Row("0, doj = DEFAULT"),
Row("1, doj < 01-01-2010"), Row("2, 01-01-2010 <= doj < 01-01-2015")))
}
test("show partition table: list partition") {
// EqualTo
checkAnswer(sql("show partitions listTable"), Seq(Row("0, workgroupcategory = DEFAULT"),
Row("1, workgroupcategory = 0"), Row("2, workgroupcategory = 1"), Row("3, workgroupcategory = 2, 3")))
}
test("show partition table: not default db") {
// EqualTo
checkAnswer(sql("show partitions partitionDB.hashTable"), Seq(Row("empno = HASH_NUMBER(3)")))
// EqualTo
checkAnswer(sql("show partitions partitionDB.rangeTable"), Seq(Row("0, doj = DEFAULT"),
Row("1, doj < 01-01-2010"), Row("2, 01-01-2010 <= doj < 01-01-2015")))
// EqualTo
checkAnswer(sql("show partitions partitionDB.listTable"), Seq(Row("0, workgroupcategory = DEFAULT"),
Row("1, workgroupcategory = 0"), Row("2, workgroupcategory = 1"), Row("3, workgroupcategory = 2, 3")))
}
test("show partition table: hive partition table") {
// EqualTo
checkAnswer(sql("show partitions hiveTable"), Seq(Row("city=Hangzhou")))
sql("use hiveDB").show()
checkAnswer(sql("show partitions hiveTable"), Seq(Row("city=Shanghai")))
sql("use default").show()
}
override def afterAll = {
sql("drop table if exists notPartitionTable")
sql("drop table if exists hashTable")
sql("drop table if exists listTable")
sql("drop table if exists rangeTable")
sql("drop table if exists hiveTable")
try {
sql("drop table if exists partitionDB.hashTable")
} catch {
case ex: NoSuchDatabaseException => print(ex.getMessage())
}
try {
sql("drop table if exists partitionDB.rangeTable")
} catch {
case ex: NoSuchDatabaseException => print(ex.getMessage())
}
try {
sql("drop table if exists partitionDB.listTable")
} catch {
case ex: NoSuchDatabaseException => print(ex.getMessage())
}
try {
sql("drop table if exists hiveDB.hiveTable")
} catch {
case ex: NoSuchDatabaseException => print(ex.getMessage())
}
sql("DROP DATABASE if exists partitionDB")
sql("DROP DATABASE if exists hiveDB")
}
}
|
HuaweiBigData/carbondata
|
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestShowPartitions.scala
|
Scala
|
apache-2.0
| 8,847
|
package test
import play.api.test._
import org.specs2.execute.{Result, AsResult}
abstract class App(app:FakeApplication = App.app) extends WithApplication(app) {
override def around[T: AsResult](t: => T): Result = super.around {
wipeData()
val result = t
result
}
def wipeData() {
}
}
object App {
def app = FakeApplication(additionalConfiguration =
Map(
"db.default.driver" -> "org.postgresql.Driver",
"db.default.url" -> "jdbc:postgresql://localhost:5432/booktown",
"evolutionplugin" -> "enabled",
"db.default.user" -> "",
"db.default.password" -> "",
"applyEvolutions.default" -> "true",
"applyDownEvolutions.default" -> "true"
)
)
}
|
stylight/postgresql-rest-api
|
test/test/App.scala
|
Scala
|
mit
| 719
|
package com.lvxingpai.model.mixin
import com.lvxingpai.model.geo.GeoPoint
import scala.beans.BeanProperty
/**
* 经纬度数据
*
* Created by zephyre on 10/20/15.
*/
trait GeoPointEnabled {
@BeanProperty
var location: GeoPoint = null
}
|
Lvxingpai/core-model
|
src/main/scala/com/lvxingpai/model/mixin/GeoPointEnabled.scala
|
Scala
|
apache-2.0
| 249
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.io._
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{SelectionKey, SocketChannel}
import java.nio.charset.StandardCharsets
import java.util
import java.util.concurrent.{CompletableFuture, ConcurrentLinkedQueue, Executors, TimeUnit}
import java.util.{Properties, Random}
import com.fasterxml.jackson.databind.node.{JsonNodeFactory, ObjectNode, TextNode}
import com.yammer.metrics.core.{Gauge, Meter}
import javax.net.ssl._
import kafka.metrics.KafkaYammerMetrics
import kafka.security.CredentialProvider
import kafka.server.{KafkaConfig, SimpleApiVersionManager, ThrottleCallback, ThrottledChannel}
import kafka.utils.Implicits._
import kafka.utils.TestUtils
import org.apache.kafka.common.memory.MemoryPool
import org.apache.kafka.common.message.ApiMessageType.ListenerType
import org.apache.kafka.common.message.{ProduceRequestData, SaslAuthenticateRequestData, SaslHandshakeRequestData, VoteRequestData}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.KafkaChannel.ChannelMuteState
import org.apache.kafka.common.network.{ClientInformation, _}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests
import org.apache.kafka.common.requests._
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.security.scram.internals.ScramMechanism
import org.apache.kafka.common.utils.{AppInfoParser, LogContext, MockTime, Time, Utils}
import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils}
import org.apache.log4j.Level
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.jdk.CollectionConverters._
import scala.util.control.ControlThrowable
class SocketServerTest {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
props.put("listeners", "PLAINTEXT://localhost:0")
props.put("num.network.threads", "1")
props.put("socket.send.buffer.bytes", "300000")
props.put("socket.receive.buffer.bytes", "300000")
props.put("queued.max.requests", "50")
props.put("socket.request.max.bytes", "100")
props.put("max.connections.per.ip", "5")
props.put("connections.max.idle.ms", "60000")
val config = KafkaConfig.fromProps(props)
val metrics = new Metrics
val credentialProvider = new CredentialProvider(ScramMechanism.mechanismNames, null)
val localAddress = InetAddress.getLoopbackAddress
// Clean-up any metrics left around by previous tests
TestUtils.clearYammerMetrics()
private val apiVersionManager = new SimpleApiVersionManager(ListenerType.ZK_BROKER)
val server = new SocketServer(config, metrics, Time.SYSTEM, credentialProvider, apiVersionManager)
server.startup()
val sockets = new ArrayBuffer[Socket]
private val kafkaLogger = org.apache.log4j.LogManager.getLogger("kafka")
private var logLevelToRestore: Level = _
@BeforeEach
def setUp(): Unit = {
// Run the tests with TRACE logging to exercise request logging path
logLevelToRestore = kafkaLogger.getLevel
kafkaLogger.setLevel(Level.TRACE)
assertTrue(server.controlPlaneRequestChannelOpt.isEmpty)
}
@AfterEach
def tearDown(): Unit = {
shutdownServerAndMetrics(server)
sockets.foreach(_.close())
sockets.clear()
kafkaLogger.setLevel(logLevelToRestore)
}
def sendRequest(socket: Socket, request: Array[Byte], id: Option[Short] = None, flush: Boolean = true): Unit = {
val outgoing = new DataOutputStream(socket.getOutputStream)
id match {
case Some(id) =>
outgoing.writeInt(request.length + 2)
outgoing.writeShort(id)
case None =>
outgoing.writeInt(request.length)
}
outgoing.write(request)
if (flush)
outgoing.flush()
}
def sendApiRequest(socket: Socket, request: AbstractRequest, header: RequestHeader): Unit = {
val serializedBytes = Utils.toArray(RequestTestUtils.serializeRequestWithHeader(header, request))
sendRequest(socket, serializedBytes)
}
def receiveResponse(socket: Socket): Array[Byte] = {
val incoming = new DataInputStream(socket.getInputStream)
val len = incoming.readInt()
val response = new Array[Byte](len)
incoming.readFully(response)
response
}
private def receiveRequest(channel: RequestChannel, timeout: Long = 2000L): RequestChannel.Request = {
channel.receiveRequest(timeout) match {
case request: RequestChannel.Request => request
case RequestChannel.ShutdownRequest => throw new AssertionError("Unexpected shutdown received")
case null => throw new AssertionError("receiveRequest timed out")
}
}
/* A simple request handler that just echos back the response */
def processRequest(channel: RequestChannel): Unit = {
processRequest(channel, receiveRequest(channel))
}
def processRequest(channel: RequestChannel, request: RequestChannel.Request): Unit = {
val byteBuffer = RequestTestUtils.serializeRequestWithHeader(request.header, request.body[AbstractRequest])
val send = new NetworkSend(request.context.connectionId, ByteBufferSend.sizePrefixed(byteBuffer))
val headerLog = RequestConvertToJson.requestHeaderNode(request.header)
channel.sendResponse(new RequestChannel.SendResponse(request, send, Some(headerLog), None))
}
def processRequestNoOpResponse(channel: RequestChannel, request: RequestChannel.Request): Unit = {
channel.sendNoOpResponse(request)
}
def connect(s: SocketServer = server,
listenerName: ListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT),
localAddr: InetAddress = null,
port: Int = 0): Socket = {
val socket = new Socket("localhost", s.boundPort(listenerName), localAddr, port)
sockets += socket
socket
}
def sslConnect(s: SocketServer = server): Socket = {
val socket = sslClientSocket(s.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SSL)))
sockets += socket
socket
}
private def sslClientSocket(port: Int): Socket = {
val sslContext = SSLContext.getInstance(TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS)
sslContext.init(null, Array(TestUtils.trustAllCerts), new java.security.SecureRandom())
val socketFactory = sslContext.getSocketFactory
val socket = socketFactory.createSocket("localhost", port)
socket.asInstanceOf[SSLSocket].setNeedClientAuth(false)
socket
}
// Create a client connection, process one request and return (client socket, connectionId)
def connectAndProcessRequest(s: SocketServer): (Socket, String) = {
val securityProtocol = s.dataPlaneAcceptors.asScala.head._1.securityProtocol
val socket = securityProtocol match {
case SecurityProtocol.PLAINTEXT | SecurityProtocol.SASL_PLAINTEXT =>
connect(s)
case SecurityProtocol.SSL | SecurityProtocol.SASL_SSL =>
sslConnect(s)
case _ =>
throw new IllegalStateException(s"Unexpected security protocol $securityProtocol")
}
val request = sendAndReceiveRequest(socket, s)
processRequest(s.dataPlaneRequestChannel, request)
(socket, request.context.connectionId)
}
def sendAndReceiveRequest(socket: Socket, server: SocketServer): RequestChannel.Request = {
sendRequest(socket, producerRequestBytes())
receiveRequest(server.dataPlaneRequestChannel)
}
def shutdownServerAndMetrics(server: SocketServer): Unit = {
server.shutdown()
server.metrics.close()
}
private def producerRequestBytes(ack: Short = 0): Array[Byte] = {
val correlationId = -1
val clientId = ""
val ackTimeoutMs = 10000
val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks(ack)
.setTimeoutMs(ackTimeoutMs)
.setTransactionalId(null))
.build()
val emptyHeader = new RequestHeader(ApiKeys.PRODUCE, emptyRequest.version, clientId, correlationId)
Utils.toArray(RequestTestUtils.serializeRequestWithHeader(emptyHeader, emptyRequest))
}
private def apiVersionRequestBytes(clientId: String, version: Short): Array[Byte] = {
val request = new ApiVersionsRequest.Builder().build(version)
val header = new RequestHeader(ApiKeys.API_VERSIONS, request.version(), clientId, -1)
Utils.toArray(RequestTestUtils.serializeRequestWithHeader(header, request))
}
@Test
def simpleRequest(): Unit = {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
// Test PLAINTEXT socket
sendRequest(plainSocket, serializedBytes)
processRequest(server.dataPlaneRequestChannel)
assertEquals(serializedBytes.toSeq, receiveResponse(plainSocket).toSeq)
verifyAcceptorBlockedPercent("PLAINTEXT", expectBlocked = false)
}
private def testClientInformation(version: Short, expectedClientSoftwareName: String,
expectedClientSoftwareVersion: String): Unit = {
val plainSocket = connect()
val address = plainSocket.getLocalAddress
val clientId = "clientId"
// Send ApiVersionsRequest - unknown expected
sendRequest(plainSocket, apiVersionRequestBytes(clientId, version))
var receivedReq = receiveRequest(server.dataPlaneRequestChannel)
assertEquals(ClientInformation.UNKNOWN_NAME_OR_VERSION, receivedReq.context.clientInformation.softwareName)
assertEquals(ClientInformation.UNKNOWN_NAME_OR_VERSION, receivedReq.context.clientInformation.softwareVersion)
server.dataPlaneRequestChannel.sendNoOpResponse(receivedReq)
// Send ProduceRequest - client info expected
sendRequest(plainSocket, producerRequestBytes())
receivedReq = receiveRequest(server.dataPlaneRequestChannel)
assertEquals(expectedClientSoftwareName, receivedReq.context.clientInformation.softwareName)
assertEquals(expectedClientSoftwareVersion, receivedReq.context.clientInformation.softwareVersion)
server.dataPlaneRequestChannel.sendNoOpResponse(receivedReq)
// Close the socket
plainSocket.setSoLinger(true, 0)
plainSocket.close()
TestUtils.waitUntilTrue(() => server.connectionCount(address) == 0, msg = "Connection not closed")
}
@Test
def testClientInformationWithLatestApiVersionsRequest(): Unit = {
testClientInformation(
ApiKeys.API_VERSIONS.latestVersion,
"apache-kafka-java",
AppInfoParser.getVersion
)
}
@Test
def testClientInformationWithOldestApiVersionsRequest(): Unit = {
testClientInformation(
ApiKeys.API_VERSIONS.oldestVersion,
ClientInformation.UNKNOWN_NAME_OR_VERSION,
ClientInformation.UNKNOWN_NAME_OR_VERSION
)
}
@Test
def testStagedListenerStartup(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROLLER://localhost:0")
testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT")
testProps.put("control.plane.listener.name", "CONTROLLER")
testProps.put("inter.broker.listener.name", "INTERNAL")
val config = KafkaConfig.fromProps(testProps)
val testableServer = new TestableSocketServer(config)
testableServer.startup(startProcessingRequests = false)
val updatedEndPoints = config.advertisedListeners.map { endpoint =>
endpoint.copy(port = testableServer.boundPort(endpoint.listenerName))
}.map(_.toJava)
val externalReadyFuture = new CompletableFuture[Void]()
val executor = Executors.newSingleThreadExecutor()
def controlPlaneListenerStarted() = {
try {
val socket = connect(testableServer, config.controlPlaneListenerName.get, localAddr = InetAddress.getLocalHost)
sendAndReceiveControllerRequest(socket, testableServer)
true
} catch {
case _: Throwable => false
}
}
def listenerStarted(listenerName: ListenerName) = {
try {
val socket = connect(testableServer, listenerName, localAddr = InetAddress.getLocalHost)
sendAndReceiveRequest(socket, testableServer)
true
} catch {
case _: Throwable => false
}
}
try {
val externalListener = new ListenerName("EXTERNAL")
val externalEndpoint = updatedEndPoints.find(e => e.listenerName.get == externalListener.value).get
val futures = Map(externalEndpoint -> externalReadyFuture)
val startFuture = executor.submit((() => testableServer.startProcessingRequests(futures)): Runnable)
TestUtils.waitUntilTrue(() => controlPlaneListenerStarted(), "Control plane listener not started")
TestUtils.waitUntilTrue(() => listenerStarted(config.interBrokerListenerName), "Inter-broker listener not started")
assertFalse(startFuture.isDone, "Socket server startup did not wait for future to complete")
assertFalse(listenerStarted(externalListener))
externalReadyFuture.complete(null)
TestUtils.waitUntilTrue(() => listenerStarted(externalListener), "External listener not started")
} finally {
executor.shutdownNow()
shutdownServerAndMetrics(testableServer)
}
}
@Test
def testStagedListenerShutdownWhenConnectionQueueIsFull(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROLLER://localhost:0")
testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT")
testProps.put("control.plane.listener.name", "CONTROLLER")
testProps.put("inter.broker.listener.name", "INTERNAL")
val config = KafkaConfig.fromProps(testProps)
val connectionQueueSize = 1
val testableServer = new TestableSocketServer(config, connectionQueueSize)
testableServer.startup(startProcessingRequests = false)
val socket1 = connect(testableServer, new ListenerName("EXTERNAL"), localAddr = InetAddress.getLocalHost)
sendRequest(socket1, producerRequestBytes())
val socket2 = connect(testableServer, new ListenerName("EXTERNAL"), localAddr = InetAddress.getLocalHost)
sendRequest(socket2, producerRequestBytes())
testableServer.shutdown()
}
@Test
def testDisabledRequestIsRejected(): Unit = {
val correlationId = 57
val header = new RequestHeader(ApiKeys.VOTE, 0, "", correlationId)
val request = new VoteRequest.Builder(new VoteRequestData()).build()
val serializedBytes = Utils.toArray(RequestTestUtils.serializeRequestWithHeader(header, request))
val socket = connect()
val outgoing = new DataOutputStream(socket.getOutputStream)
try {
outgoing.writeInt(serializedBytes.length)
outgoing.write(serializedBytes)
outgoing.flush()
receiveResponse(socket)
} catch {
case _: IOException => // we expect the server to close the socket
} finally {
outgoing.close()
}
}
@Test
def tooBigRequestIsRejected(): Unit = {
val tooManyBytes = new Array[Byte](server.config.socketRequestMaxBytes + 1)
new Random().nextBytes(tooManyBytes)
val socket = connect()
val outgoing = new DataOutputStream(socket.getOutputStream)
outgoing.writeInt(tooManyBytes.length)
try {
// Server closes client connection when it processes the request length because
// it is too big. The write of request body may fail if the connection has been closed.
outgoing.write(tooManyBytes)
outgoing.flush()
receiveResponse(socket)
} catch {
case _: IOException => // thats fine
}
}
@Test
def testGracefulClose(): Unit = {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
for (_ <- 0 until 10)
sendRequest(plainSocket, serializedBytes)
plainSocket.close()
for (_ <- 0 until 10) {
val request = receiveRequest(server.dataPlaneRequestChannel)
assertNotNull(request, "receiveRequest timed out")
processRequestNoOpResponse(server.dataPlaneRequestChannel, request)
}
}
@Test
def testNoOpAction(): Unit = {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
for (_ <- 0 until 3)
sendRequest(plainSocket, serializedBytes)
for (_ <- 0 until 3) {
val request = receiveRequest(server.dataPlaneRequestChannel)
assertNotNull(request, "receiveRequest timed out")
processRequestNoOpResponse(server.dataPlaneRequestChannel, request)
}
}
@Test
def testConnectionId(): Unit = {
val sockets = (1 to 5).map(_ => connect())
val serializedBytes = producerRequestBytes()
val requests = sockets.map{socket =>
sendRequest(socket, serializedBytes)
receiveRequest(server.dataPlaneRequestChannel)
}
requests.zipWithIndex.foreach { case (request, i) =>
val index = request.context.connectionId.split("-").last
assertEquals(i.toString, index)
}
sockets.foreach(_.close)
}
@Test
def testIdleConnection(): Unit = {
val idleTimeMs = 60000
val time = new MockTime()
props.put(KafkaConfig.ConnectionsMaxIdleMsProp, idleTimeMs.toString)
val serverMetrics = new Metrics
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics,
time, credentialProvider, apiVersionManager)
try {
overrideServer.startup()
val serializedBytes = producerRequestBytes()
// Connection with no outstanding requests
val socket0 = connect(overrideServer)
sendRequest(socket0, serializedBytes)
val request0 = receiveRequest(overrideServer.dataPlaneRequestChannel)
processRequest(overrideServer.dataPlaneRequestChannel, request0)
assertTrue(openChannel(request0, overrideServer).nonEmpty, "Channel not open")
assertEquals(openChannel(request0, overrideServer), openOrClosingChannel(request0, overrideServer))
TestUtils.waitUntilTrue(() => !openChannel(request0, overrideServer).get.isMuted, "Failed to unmute channel")
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request0, overrideServer).isEmpty, "Failed to close idle channel")
assertTrue(openChannel(request0, overrideServer).isEmpty, "Channel not removed")
// Connection with one request being processed (channel is muted), no other in-flight requests
val socket1 = connect(overrideServer)
sendRequest(socket1, serializedBytes)
val request1 = receiveRequest(overrideServer.dataPlaneRequestChannel)
assertTrue(openChannel(request1, overrideServer).nonEmpty, "Channel not open")
assertEquals(openChannel(request1, overrideServer), openOrClosingChannel(request1, overrideServer))
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request1, overrideServer).isEmpty, "Failed to close idle channel")
assertTrue(openChannel(request1, overrideServer).isEmpty, "Channel not removed")
processRequest(overrideServer.dataPlaneRequestChannel, request1)
// Connection with one request being processed (channel is muted), more in-flight requests
val socket2 = connect(overrideServer)
val request2 = sendRequestsReceiveOne(overrideServer, socket2, serializedBytes, 3)
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request2, overrideServer).isEmpty, "Failed to close idle channel")
assertTrue(openChannel(request1, overrideServer).isEmpty, "Channel not removed")
processRequest(overrideServer.dataPlaneRequestChannel, request2) // this triggers a failed send since channel has been closed
assertNull(overrideServer.dataPlaneRequestChannel.receiveRequest(200), "Received request on expired channel")
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testConnectionIdReuse(): Unit = {
val idleTimeMs = 60000
val time = new MockTime()
props.put(KafkaConfig.ConnectionsMaxIdleMsProp, idleTimeMs.toString)
props ++= sslServerProps
val serverMetrics = new Metrics
@volatile var selector: TestableSelector = null
val overrideConnectionId = "127.0.0.1:1-127.0.0.1:2-0"
val overrideServer = new SocketServer(
KafkaConfig.fromProps(props), serverMetrics, time, credentialProvider, apiVersionManager
) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool, isPrivilegedListener: Boolean): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, memoryPool, new LogContext(), Processor.ConnectionQueueSize, isPrivilegedListener, apiVersionManager) {
override protected[network] def connectionId(socket: Socket): String = overrideConnectionId
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics)
selector = testableSelector
testableSelector
}
}
}
}
def openChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).channel(overrideConnectionId)
def openOrClosingChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).openOrClosingChannel(overrideConnectionId)
def connectionCount = overrideServer.connectionCount(InetAddress.getByName("127.0.0.1"))
// Create a client connection and wait for server to register the connection with the selector. For
// test scenarios below where `Selector.register` fails, the wait ensures that checks are performed
// only after `register` is processed by the server.
def connectAndWaitForConnectionRegister(): Socket = {
val connections = selector.operationCounts(SelectorOperation.Register)
val socket = sslConnect(overrideServer)
TestUtils.waitUntilTrue(() =>
selector.operationCounts(SelectorOperation.Register) == connections + 1, "Connection not registered")
socket
}
try {
overrideServer.startup()
val socket1 = connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1 && openChannel.isDefined, "Failed to create channel")
val channel1 = openChannel.getOrElse(throw new RuntimeException("Channel not found"))
// Create new connection with same id when `channel1` is still open and in Selector.channels
// Check that new connection is closed and openChannel still contains `channel1`
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1, "Failed to close channel")
assertSame(channel1, openChannel.getOrElse(throw new RuntimeException("Channel not found")))
socket1.close()
TestUtils.waitUntilTrue(() => openChannel.isEmpty, "Channel not closed")
// Create a channel with buffered receive and close remote connection
val request = makeChannelWithBufferedRequestsAndCloseRemote(overrideServer, selector)
val channel2 = openChannel.getOrElse(throw new RuntimeException("Channel not found"))
// Create new connection with same id when `channel2` is closing, but still in Selector.channels
// Check that new connection is closed and openOrClosingChannel still contains `channel2`
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1, "Failed to close channel")
assertSame(channel2, openOrClosingChannel.getOrElse(throw new RuntimeException("Channel not found")))
// Complete request with failed send so that `channel2` is removed from Selector.channels
processRequest(overrideServer.dataPlaneRequestChannel, request)
TestUtils.waitUntilTrue(() => connectionCount == 0 && openOrClosingChannel.isEmpty, "Failed to remove channel with failed send")
// Check that new connections can be created with the same id since `channel1` is no longer in Selector
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1 && openChannel.isDefined, "Failed to open new channel")
val newChannel = openChannel.getOrElse(throw new RuntimeException("Channel not found"))
assertNotSame(channel1, newChannel)
newChannel.disconnect()
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
private def makeSocketWithBufferedRequests(server: SocketServer,
serverSelector: Selector,
proxyServer: ProxyServer,
numBufferedRequests: Int = 2): (Socket, RequestChannel.Request) = {
val requestBytes = producerRequestBytes()
val socket = sslClientSocket(proxyServer.localPort)
sendRequest(socket, requestBytes)
val request1 = receiveRequest(server.dataPlaneRequestChannel)
val connectionId = request1.context.connectionId
val channel = server.dataPlaneProcessor(0).channel(connectionId).getOrElse(throw new IllegalStateException("Channel not found"))
val transportLayer: SslTransportLayer = JTestUtils.fieldValue(channel, classOf[KafkaChannel], "transportLayer")
val netReadBuffer: ByteBuffer = JTestUtils.fieldValue(transportLayer, classOf[SslTransportLayer], "netReadBuffer")
proxyServer.enableBuffering(netReadBuffer)
(1 to numBufferedRequests).foreach { _ => sendRequest(socket, requestBytes) }
val keysWithBufferedRead: util.Set[SelectionKey] = JTestUtils.fieldValue(serverSelector, classOf[Selector], "keysWithBufferedRead")
keysWithBufferedRead.add(channel.selectionKey)
JTestUtils.setFieldValue(transportLayer, "hasBytesBuffered", true)
(socket, request1)
}
/**
* Create a channel with data in SSL buffers and close the remote connection.
* The channel should remain open in SocketServer even if it detects that the peer has closed
* the connection since there is pending data to be processed.
*/
private def makeChannelWithBufferedRequestsAndCloseRemote(server: SocketServer,
serverSelector: Selector,
makeClosing: Boolean = false): RequestChannel.Request = {
val proxyServer = new ProxyServer(server)
try {
val (socket, request1) = makeSocketWithBufferedRequests(server, serverSelector, proxyServer)
socket.close()
proxyServer.serverConnSocket.close()
TestUtils.waitUntilTrue(() => proxyServer.clientConnSocket.isClosed, "Client socket not closed", waitTimeMs = 10000)
processRequestNoOpResponse(server.dataPlaneRequestChannel, request1)
val channel = openOrClosingChannel(request1, server).getOrElse(throw new IllegalStateException("Channel closed too early"))
if (makeClosing)
serverSelector.asInstanceOf[TestableSelector].pendingClosingChannels.add(channel)
receiveRequest(server.dataPlaneRequestChannel, timeout = 10000)
} finally {
proxyServer.close()
}
}
def sendRequestsReceiveOne(server: SocketServer, socket: Socket, requestBytes: Array[Byte], numRequests: Int): RequestChannel.Request = {
(1 to numRequests).foreach(i => sendRequest(socket, requestBytes, flush = i == numRequests))
receiveRequest(server.dataPlaneRequestChannel)
}
private def closeSocketWithPendingRequest(server: SocketServer,
createSocket: () => Socket): RequestChannel.Request = {
def maybeReceiveRequest(): Option[RequestChannel.Request] = {
try {
Some(receiveRequest(server.dataPlaneRequestChannel, timeout = 1000))
} catch {
case e: Exception => None
}
}
def closedChannelWithPendingRequest(): Option[RequestChannel.Request] = {
val socket = createSocket.apply()
val req1 = sendRequestsReceiveOne(server, socket, producerRequestBytes(ack = 0), numRequests = 100)
processRequestNoOpResponse(server.dataPlaneRequestChannel, req1)
// Set SoLinger to 0 to force a hard disconnect via TCP RST
socket.setSoLinger(true, 0)
socket.close()
maybeReceiveRequest().flatMap { req =>
processRequestNoOpResponse(server.dataPlaneRequestChannel, req)
maybeReceiveRequest()
}
}
val (request, _) = TestUtils.computeUntilTrue(closedChannelWithPendingRequest()) { req => req.nonEmpty }
request.getOrElse(throw new IllegalStateException("Could not create close channel with pending request"))
}
// Prepares test setup for throttled channel tests. throttlingDone controls whether or not throttling has completed
// in quota manager.
def throttledChannelTestSetUp(socket: Socket, serializedBytes: Array[Byte], noOpResponse: Boolean,
throttlingInProgress: Boolean): RequestChannel.Request = {
sendRequest(socket, serializedBytes)
// Mimic a primitive request handler that fetches the request from RequestChannel and place a response with a
// throttled channel.
val request = receiveRequest(server.dataPlaneRequestChannel)
val byteBuffer = RequestTestUtils.serializeRequestWithHeader(request.header, request.body[AbstractRequest])
val send = new NetworkSend(request.context.connectionId, ByteBufferSend.sizePrefixed(byteBuffer))
val channelThrottlingCallback = new ThrottleCallback {
override def startThrottling(): Unit = server.dataPlaneRequestChannel.startThrottling(request)
override def endThrottling(): Unit = server.dataPlaneRequestChannel.endThrottling(request)
}
val throttledChannel = new ThrottledChannel(new MockTime(), 100, channelThrottlingCallback)
val headerLog = RequestConvertToJson.requestHeaderNode(request.header)
val response =
if (!noOpResponse)
new RequestChannel.SendResponse(request, send, Some(headerLog), None)
else
new RequestChannel.NoOpResponse(request)
server.dataPlaneRequestChannel.sendResponse(response)
// Quota manager would call notifyThrottlingDone() on throttling completion. Simulate it if throttleingInProgress is
// false.
if (!throttlingInProgress)
throttledChannel.notifyThrottlingDone()
request
}
def openChannel(request: RequestChannel.Request, server: SocketServer = this.server): Option[KafkaChannel] =
server.dataPlaneProcessor(0).channel(request.context.connectionId)
def openOrClosingChannel(request: RequestChannel.Request, server: SocketServer = this.server): Option[KafkaChannel] =
server.dataPlaneProcessor(0).openOrClosingChannel(request.context.connectionId)
@Test
def testSendActionResponseWithThrottledChannelWhereThrottlingInProgress(): Unit = {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, false, true)
// receive response
assertEquals(serializedBytes.toSeq, receiveResponse(socket).toSeq)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.MUTED_AND_THROTTLED), "fail")
// Channel should still be muted.
assertTrue(openOrClosingChannel(request).exists(c => c.isMuted()))
}
@Test
def testSendActionResponseWithThrottledChannelWhereThrottlingAlreadyDone(): Unit = {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, false, false)
// receive response
assertEquals(serializedBytes.toSeq, receiveResponse(socket).toSeq)
// Since throttling is already done, the channel can be unmuted after sending out the response.
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.NOT_MUTED), "fail")
// Channel is now unmuted.
assertFalse(openOrClosingChannel(request).exists(c => c.isMuted()))
}
@Test
def testNoOpActionResponseWithThrottledChannelWhereThrottlingInProgress(): Unit = {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, true, true)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.MUTED_AND_THROTTLED), "fail")
// Channel should still be muted.
assertTrue(openOrClosingChannel(request).exists(c => c.isMuted()))
}
@Test
def testNoOpActionResponseWithThrottledChannelWhereThrottlingAlreadyDone(): Unit = {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, true, false)
// Since throttling is already done, the channel can be unmuted.
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.NOT_MUTED), "fail")
// Channel is now unmuted.
assertFalse(openOrClosingChannel(request).exists(c => c.isMuted()))
}
@Test
def testSocketsCloseOnShutdown(): Unit = {
// open a connection
val plainSocket = connect()
plainSocket.setTcpNoDelay(true)
val bytes = new Array[Byte](40)
// send a request first to make sure the connection has been picked up by the socket server
sendRequest(plainSocket, bytes, Some(0))
processRequest(server.dataPlaneRequestChannel)
// the following sleep is necessary to reliably detect the connection close when we send data below
Thread.sleep(200L)
// make sure the sockets are open
server.dataPlaneAcceptors.asScala.values.foreach(acceptor => assertFalse(acceptor.serverChannel.socket.isClosed))
// then shutdown the server
shutdownServerAndMetrics(server)
verifyRemoteConnectionClosed(plainSocket)
}
@Test
def testMaxConnectionsPerIp(): Unit = {
// make the maximum allowable number of connections
val conns = (0 until server.config.maxConnectionsPerIp).map(_ => connect())
// now try one more (should fail)
val conn = connect()
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
conn.close()
// it should succeed after closing one connection
val address = conns.head.getInetAddress
conns.head.close()
TestUtils.waitUntilTrue(() => server.connectionCount(address) < conns.length,
"Failed to decrement connection count after close")
val conn2 = connect()
val serializedBytes = producerRequestBytes()
sendRequest(conn2, serializedBytes)
val request = server.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
}
@Test
def testZeroMaxConnectionsPerIp(): Unit = {
val newProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
newProps.setProperty(KafkaConfig.MaxConnectionsPerIpProp, "0")
newProps.setProperty(KafkaConfig.MaxConnectionsPerIpOverridesProp, "%s:%s".format("127.0.0.1", "5"))
val server = new SocketServer(KafkaConfig.fromProps(newProps), new Metrics(),
Time.SYSTEM, credentialProvider, apiVersionManager)
try {
server.startup()
// make the maximum allowable number of connections
val conns = (0 until 5).map(_ => connect(server))
// now try one more (should fail)
val conn = connect(server)
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
conn.close()
// it should succeed after closing one connection
val address = conns.head.getInetAddress
conns.head.close()
TestUtils.waitUntilTrue(() => server.connectionCount(address) < conns.length,
"Failed to decrement connection count after close")
val conn2 = connect(server)
val serializedBytes = producerRequestBytes()
sendRequest(conn2, serializedBytes)
val request = server.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
// now try to connect from the external facing interface, which should fail
val conn3 = connect(s = server, localAddr = InetAddress.getLocalHost)
conn3.setSoTimeout(3000)
assertEquals(-1, conn3.getInputStream.read())
conn3.close()
} finally {
shutdownServerAndMetrics(server)
}
}
@Test
def testMaxConnectionsPerIpOverrides(): Unit = {
val overrideNum = server.config.maxConnectionsPerIp + 1
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
overrideProps.put(KafkaConfig.MaxConnectionsPerIpOverridesProp, s"localhost:$overrideNum")
val serverMetrics = new Metrics()
val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), serverMetrics,
Time.SYSTEM, credentialProvider, apiVersionManager)
try {
overrideServer.startup()
// make the maximum allowable number of connections
val conns = (0 until overrideNum).map(_ => connect(overrideServer))
// it should succeed
val serializedBytes = producerRequestBytes()
sendRequest(conns.last, serializedBytes)
val request = overrideServer.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
// now try one more (should fail)
val conn = connect(overrideServer)
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testConnectionRatePerIp(): Unit = {
val defaultTimeoutMs = 2000
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
overrideProps.remove(KafkaConfig.MaxConnectionsPerIpProp)
overrideProps.put(KafkaConfig.NumQuotaSamplesProp, String.valueOf(2))
val connectionRate = 5
val time = new MockTime()
val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), new Metrics(),
time, credentialProvider, apiVersionManager)
// update the connection rate to 5
overrideServer.connectionQuotas.updateIpConnectionRateQuota(None, Some(connectionRate))
try {
overrideServer.startup()
// make the (maximum allowable number + 1) of connections
(0 to connectionRate).map(_ => connect(overrideServer))
val acceptors = overrideServer.dataPlaneAcceptors.asScala.values
// waiting for 5 connections got accepted and 1 connection got throttled
TestUtils.waitUntilTrue(
() => acceptors.foldLeft(0)((accumulator, acceptor) => accumulator + acceptor.throttledSockets.size) == 1,
"timeout waiting for 1 connection to get throttled",
defaultTimeoutMs)
// now try one more, so that we can make sure this connection will get throttled
var conn = connect(overrideServer)
// there should be total 2 connection got throttled now
TestUtils.waitUntilTrue(
() => acceptors.foldLeft(0)((accumulator, acceptor) => accumulator + acceptor.throttledSockets.size) == 2,
"timeout waiting for 2 connection to get throttled",
defaultTimeoutMs)
// advance time to unthrottle connections
time.sleep(defaultTimeoutMs)
acceptors.foreach(_.wakeup())
// make sure there are no connection got throttled now(and the throttled connections should be closed)
TestUtils.waitUntilTrue(() => acceptors.forall(_.throttledSockets.isEmpty),
"timeout waiting for connection to be unthrottled",
defaultTimeoutMs)
// verify the connection is closed now
verifyRemoteConnectionClosed(conn)
// new connection should succeed after previous connection closed, and previous samples have been expired
conn = connect(overrideServer)
val serializedBytes = producerRequestBytes()
sendRequest(conn, serializedBytes)
val request = overrideServer.dataPlaneRequestChannel.receiveRequest(defaultTimeoutMs)
assertNotNull(request)
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testThrottledSocketsClosedOnShutdown(): Unit = {
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
overrideProps.remove("max.connections.per.ip")
overrideProps.put(KafkaConfig.NumQuotaSamplesProp, String.valueOf(2))
val connectionRate = 5
val time = new MockTime()
val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), new Metrics(),
time, credentialProvider, apiVersionManager)
overrideServer.connectionQuotas.updateIpConnectionRateQuota(None, Some(connectionRate))
overrideServer.startup()
// make the maximum allowable number of connections
(0 until connectionRate).map(_ => connect(overrideServer))
// now try one more (should get throttled)
val conn = connect(overrideServer)
// don't advance time so that connection never gets unthrottled
shutdownServerAndMetrics(overrideServer)
verifyRemoteConnectionClosed(conn)
}
private def verifyRemoteConnectionClosed(connection: Socket): Unit = {
val largeChunkOfBytes = new Array[Byte](1000000)
// doing a subsequent send should throw an exception as the connection should be closed.
// send a large chunk of bytes to trigger a socket flush
assertThrows(classOf[IOException], () => sendRequest(connection, largeChunkOfBytes, Some(0)))
}
@Test
def testSslSocketServer(): Unit = {
val serverMetrics = new Metrics
val overrideServer = new SocketServer(KafkaConfig.fromProps(sslServerProps), serverMetrics,
Time.SYSTEM, credentialProvider, apiVersionManager)
try {
overrideServer.startup()
val sslContext = SSLContext.getInstance(TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS)
sslContext.init(null, Array(TestUtils.trustAllCerts), new java.security.SecureRandom())
val socketFactory = sslContext.getSocketFactory
val sslSocket = socketFactory.createSocket("localhost",
overrideServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SSL))).asInstanceOf[SSLSocket]
sslSocket.setNeedClientAuth(false)
val correlationId = -1
val clientId = ""
val ackTimeoutMs = 10000
val ack = 0: Short
val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks(ack)
.setTimeoutMs(ackTimeoutMs)
.setTransactionalId(null))
.build()
val emptyHeader = new RequestHeader(ApiKeys.PRODUCE, emptyRequest.version, clientId, correlationId)
val serializedBytes = Utils.toArray(RequestTestUtils.serializeRequestWithHeader(emptyHeader, emptyRequest))
sendRequest(sslSocket, serializedBytes)
processRequest(overrideServer.dataPlaneRequestChannel)
assertEquals(serializedBytes.toSeq, receiveResponse(sslSocket).toSeq)
sslSocket.close()
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testSaslReauthenticationFailureWithKip152SaslAuthenticate(): Unit = {
checkSaslReauthenticationFailure(true)
}
@Test
def testSaslReauthenticationFailureNoKip152SaslAuthenticate(): Unit = {
checkSaslReauthenticationFailure(false)
}
def checkSaslReauthenticationFailure(leverageKip152SaslAuthenticateRequest : Boolean): Unit = {
shutdownServerAndMetrics(server) // we will use our own instance because we require custom configs
val username = "admin"
val password = "admin-secret"
val reauthMs = 1500
val brokerProps = new Properties
brokerProps.setProperty("listeners", "SASL_PLAINTEXT://localhost:0")
brokerProps.setProperty("security.inter.broker.protocol", "SASL_PLAINTEXT")
brokerProps.setProperty("listener.name.sasl_plaintext.plain.sasl.jaas.config",
"org.apache.kafka.common.security.plain.PlainLoginModule required " +
"username=\\"%s\\" password=\\"%s\\" user_%s=\\"%s\\";".format(username, password, username, password))
brokerProps.setProperty("sasl.mechanism.inter.broker.protocol", "PLAIN")
brokerProps.setProperty("listener.name.sasl_plaintext.sasl.enabled.mechanisms", "PLAIN")
brokerProps.setProperty("num.network.threads", "1")
brokerProps.setProperty("connections.max.reauth.ms", reauthMs.toString)
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect,
saslProperties = Some(brokerProps), enableSaslPlaintext = true)
val time = new MockTime()
val overrideServer = new TestableSocketServer(KafkaConfig.fromProps(overrideProps), time = time)
try {
overrideServer.startup()
val socket = connect(overrideServer, ListenerName.forSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT))
val correlationId = -1
val clientId = ""
// send a SASL handshake request
val version : Short = if (leverageKip152SaslAuthenticateRequest) ApiKeys.SASL_HANDSHAKE.latestVersion else 0
val saslHandshakeRequest = new SaslHandshakeRequest.Builder(new SaslHandshakeRequestData().setMechanism("PLAIN"))
.build(version)
val saslHandshakeHeader = new RequestHeader(ApiKeys.SASL_HANDSHAKE, saslHandshakeRequest.version, clientId,
correlationId)
sendApiRequest(socket, saslHandshakeRequest, saslHandshakeHeader)
receiveResponse(socket)
// now send credentials
val authBytes = "admin\\u0000admin\\u0000admin-secret".getBytes(StandardCharsets.UTF_8)
if (leverageKip152SaslAuthenticateRequest) {
// send credentials within a SaslAuthenticateRequest
val saslAuthenticateRequest = new SaslAuthenticateRequest.Builder(new SaslAuthenticateRequestData()
.setAuthBytes(authBytes)).build()
val saslAuthenticateHeader = new RequestHeader(ApiKeys.SASL_AUTHENTICATE, saslAuthenticateRequest.version,
clientId, correlationId)
sendApiRequest(socket, saslAuthenticateRequest, saslAuthenticateHeader)
} else {
// send credentials directly, without a SaslAuthenticateRequest
sendRequest(socket, authBytes)
}
receiveResponse(socket)
assertEquals(1, overrideServer.testableSelector.channels.size)
// advance the clock long enough to cause server-side disconnection upon next send...
time.sleep(reauthMs * 2)
// ...and now send something to trigger the disconnection
val ackTimeoutMs = 10000
val ack = 0: Short
val emptyRequest = requests.ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks(ack)
.setTimeoutMs(ackTimeoutMs)
.setTransactionalId(null))
.build()
val emptyHeader = new RequestHeader(ApiKeys.PRODUCE, emptyRequest.version, clientId, correlationId)
sendApiRequest(socket, emptyRequest, emptyHeader)
// wait a little bit for the server-side disconnection to occur since it happens asynchronously
try {
TestUtils.waitUntilTrue(() => overrideServer.testableSelector.channels.isEmpty,
"Expired connection was not closed", 1000, 100)
} finally {
socket.close()
}
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testSessionPrincipal(): Unit = {
val socket = connect()
val bytes = new Array[Byte](40)
sendRequest(socket, bytes, Some(0))
assertEquals(KafkaPrincipal.ANONYMOUS, receiveRequest(server.dataPlaneRequestChannel).session.principal)
}
/* Test that we update request metrics if the client closes the connection while the broker response is in flight. */
@Test
def testClientDisconnectionUpdatesRequestMetrics(): Unit = {
// The way we detect a connection close from the client depends on the response size. If it's small, an
// IOException ("Connection reset by peer") is thrown when the Selector reads from the socket. If
// it's large, an IOException ("Broken pipe") is thrown when the Selector writes to the socket. We test
// both paths to ensure they are handled correctly.
checkClientDisconnectionUpdatesRequestMetrics(0)
checkClientDisconnectionUpdatesRequestMetrics(550000)
}
private def checkClientDisconnectionUpdatesRequestMetrics(responseBufferSize: Int): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
val serverMetrics = new Metrics
var conn: Socket = null
val overrideServer = new SocketServer(
KafkaConfig.fromProps(props), serverMetrics, Time.SYSTEM, credentialProvider, apiVersionManager
) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool, isPrivilegedListener: Boolean = false): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, MemoryPool.NONE, new LogContext(), Processor.ConnectionQueueSize, isPrivilegedListener, apiVersionManager) {
override protected[network] def sendResponse(response: RequestChannel.Response, responseSend: Send): Unit = {
conn.close()
super.sendResponse(response, responseSend)
}
}
}
}
try {
overrideServer.startup()
conn = connect(overrideServer)
val serializedBytes = producerRequestBytes()
sendRequest(conn, serializedBytes)
val channel = overrideServer.dataPlaneRequestChannel
val request = receiveRequest(channel)
val requestMetrics = channel.metrics(request.header.apiKey.name)
def totalTimeHistCount(): Long = requestMetrics.totalTimeHist.count
val send = new NetworkSend(request.context.connectionId, ByteBufferSend.sizePrefixed(ByteBuffer.allocate(responseBufferSize)))
val headerLog = new ObjectNode(JsonNodeFactory.instance)
headerLog.set("response", new TextNode("someResponse"))
channel.sendResponse(new RequestChannel.SendResponse(request, send, Some(headerLog), None))
val expectedTotalTimeCount = totalTimeHistCount() + 1
TestUtils.waitUntilTrue(() => totalTimeHistCount() == expectedTotalTimeCount,
s"request metrics not updated, expected: $expectedTotalTimeCount, actual: ${totalTimeHistCount()}")
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testClientDisconnectionWithOutstandingReceivesProcessedUntilFailedSend(): Unit = {
val serverMetrics = new Metrics
@volatile var selector: TestableSelector = null
val overrideServer = new SocketServer(
KafkaConfig.fromProps(props), serverMetrics, Time.SYSTEM, credentialProvider, apiVersionManager
) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool, isPrivilegedListener: Boolean): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, memoryPool, new LogContext(), Processor.ConnectionQueueSize, isPrivilegedListener, apiVersionManager) {
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics)
selector = testableSelector
testableSelector
}
}
}
}
try {
overrideServer.startup()
// Create a channel, send some requests and close socket. Receive one pending request after socket was closed.
val request = closeSocketWithPendingRequest(overrideServer, () => connect(overrideServer))
// Complete request with socket exception so that the channel is closed
processRequest(overrideServer.dataPlaneRequestChannel, request)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request, overrideServer).isEmpty, "Channel not closed after failed send")
assertTrue(selector.completedSends.isEmpty, "Unexpected completed send")
} finally {
overrideServer.shutdown()
serverMetrics.close()
}
}
/*
* Test that we update request metrics if the channel has been removed from the selector when the broker calls
* `selector.send` (selector closes old connections, for example).
*/
@Test
def testBrokerSendAfterChannelClosedUpdatesRequestMetrics(): Unit = {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
props.setProperty(KafkaConfig.ConnectionsMaxIdleMsProp, "110")
val serverMetrics = new Metrics
var conn: Socket = null
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics,
Time.SYSTEM, credentialProvider, apiVersionManager)
try {
overrideServer.startup()
conn = connect(overrideServer)
val serializedBytes = producerRequestBytes()
sendRequest(conn, serializedBytes)
val channel = overrideServer.dataPlaneRequestChannel
val request = receiveRequest(channel)
TestUtils.waitUntilTrue(() => overrideServer.dataPlaneProcessor(request.processor).channel(request.context.connectionId).isEmpty,
s"Idle connection `${request.context.connectionId}` was not closed by selector")
val requestMetrics = channel.metrics(request.header.apiKey.name)
def totalTimeHistCount(): Long = requestMetrics.totalTimeHist.count
val expectedTotalTimeCount = totalTimeHistCount() + 1
processRequest(channel, request)
TestUtils.waitUntilTrue(() => totalTimeHistCount() == expectedTotalTimeCount,
s"request metrics not updated, expected: $expectedTotalTimeCount, actual: ${totalTimeHistCount()}")
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testRequestMetricsAfterStop(): Unit = {
server.stopProcessingRequests()
val version = ApiKeys.PRODUCE.latestVersion
val version2 = (version - 1).toShort
for (_ <- 0 to 1) server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version).mark()
server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version2).mark()
assertEquals(2, server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version).count())
server.dataPlaneRequestChannel.updateErrorMetrics(ApiKeys.PRODUCE, Map(Errors.NONE -> 1))
val nonZeroMeters = Map(s"kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce,version=$version" -> 2,
s"kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce,version=$version2" -> 1,
"kafka.network:type=RequestMetrics,name=ErrorsPerSec,request=Produce,error=NONE" -> 1)
def requestMetricMeters = KafkaYammerMetrics
.defaultRegistry
.allMetrics.asScala
.collect { case (k, metric: Meter) if k.getType == "RequestMetrics" => (k.toString, metric.count) }
assertEquals(nonZeroMeters, requestMetricMeters.filter { case (_, value) => value != 0 })
server.shutdown()
assertEquals(Map.empty, requestMetricMeters)
}
@Test
def testMetricCollectionAfterShutdown(): Unit = {
server.shutdown()
val nonZeroMetricNamesAndValues = KafkaYammerMetrics
.defaultRegistry
.allMetrics.asScala
.filter { case (k, _) => k.getName.endsWith("IdlePercent") || k.getName.endsWith("NetworkProcessorAvgIdlePercent") }
.collect { case (k, metric: Gauge[_]) => (k, metric.value().asInstanceOf[Double]) }
.filter { case (_, value) => value != 0.0 && !value.equals(Double.NaN) }
assertEquals(Map.empty, nonZeroMetricNamesAndValues)
}
@Test
def testProcessorMetricsTags(): Unit = {
val kafkaMetricNames = metrics.metrics.keySet.asScala.filter(_.tags.asScala.get("listener").nonEmpty)
assertFalse(kafkaMetricNames.isEmpty)
val expectedListeners = Set("PLAINTEXT")
kafkaMetricNames.foreach { kafkaMetricName =>
assertTrue(expectedListeners.contains(kafkaMetricName.tags.get("listener")))
}
// legacy metrics not tagged
val yammerMetricsNames = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala
.filter { case (k, _) => k.getType.equals("Processor") }
.collect { case (k, _: Gauge[_]) => k }
assertFalse(yammerMetricsNames.isEmpty)
yammerMetricsNames.foreach { yammerMetricName =>
assertFalse(yammerMetricName.getMBeanName.contains("listener="))
}
}
/**
* Tests exception handling in [[Processor.configureNewConnections]]. Exception is
* injected into [[Selector.register]] which is used to register each new connection.
* Test creates two connections in a single iteration by waking up the selector only
* when two connections are ready.
* Verifies that
* - first failed connection is closed
* - second connection is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def configureNewConnectionException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
testableSelector.addFailure(SelectorOperation.Register)
val sockets = (1 to 2).map(_ => connect(testableServer))
testableSelector.waitForOperations(SelectorOperation.Register, 2)
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 1, "Failed channel not removed")
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processNewResponses]]. Exception is
* injected into [[Selector.send]] which is used to send the new response.
* Test creates two responses in a single iteration by waking up the selector only
* when two responses are ready.
* Verifies that
* - first failed channel is closed
* - second response is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processNewResponseException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sockets = (1 to 2).map(_ => connect(testableServer))
sockets.foreach(sendRequest(_, producerRequestBytes()))
testableServer.testableSelector.addFailure(SelectorOperation.Send)
sockets.foreach(_ => processRequest(testableServer.dataPlaneRequestChannel))
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processNewResponses]] when [[Selector.send]]
* fails with `CancelledKeyException`, which is handled by the selector using a different
* code path. Test scenario is similar to [[SocketServerTest.processNewResponseException]].
*/
@Test
def sendCancelledKeyException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sockets = (1 to 2).map(_ => connect(testableServer))
sockets.foreach(sendRequest(_, producerRequestBytes()))
val requestChannel = testableServer.dataPlaneRequestChannel
val requests = sockets.map(_ => receiveRequest(requestChannel))
val failedConnectionId = requests(0).context.connectionId
// `KafkaChannel.disconnect()` cancels the selection key, triggering CancelledKeyException during send
testableSelector.channel(failedConnectionId).disconnect()
requests.foreach(processRequest(requestChannel, _))
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(failedConnectionId, locallyClosed = false)
val successfulSocket = if (isSocketConnectionId(failedConnectionId, sockets(0))) sockets(1) else sockets(0)
assertProcessorHealthy(testableServer, Seq(successfulSocket))
})
}
/**
* Tests channel send failure handling when send failure is triggered by [[Selector.send]]
* to a channel whose peer has closed its connection.
*/
@Test
def remoteCloseSendFailure(): Unit = {
verifySendFailureAfterRemoteClose(makeClosing = false)
}
/**
* Tests channel send failure handling when send failure is triggered by [[Selector.send]]
* to a channel whose peer has closed its connection and the channel is in `closingChannels`.
*/
@Test
def closingChannelSendFailure(): Unit = {
verifySendFailureAfterRemoteClose(makeClosing = true)
}
private def verifySendFailureAfterRemoteClose(makeClosing: Boolean): Unit = {
props ++= sslServerProps
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
val serializedBytes = producerRequestBytes()
val request = makeChannelWithBufferedRequestsAndCloseRemote(testableServer, testableSelector, makeClosing)
val otherSocket = sslConnect(testableServer)
sendRequest(otherSocket, serializedBytes)
processRequest(testableServer.dataPlaneRequestChannel, request)
processRequest(testableServer.dataPlaneRequestChannel) // Also process request from other socket
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(request.context.connectionId, locallyClosed = false)
assertProcessorHealthy(testableServer, Seq(otherSocket))
})
}
/**
* Verifies that all pending buffered receives are processed even if remote connection is closed.
* The channel must be closed after pending receives are processed.
*/
@Test
def remoteCloseWithBufferedReceives(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = false)
}
/**
* Verifies that channel is closed when remote client closes its connection if there is no
* buffered receive.
*/
@Test
def remoteCloseWithoutBufferedReceives(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 0, hasIncomplete = false)
}
/**
* Verifies that channel is closed when remote client closes its connection if there is a pending
* receive that is incomplete.
*/
@Test
def remoteCloseWithIncompleteBufferedReceive(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 0, hasIncomplete = true)
}
/**
* Verifies that all pending buffered receives are processed even if remote connection is closed.
* The channel must be closed after complete receives are processed, even if there is an incomplete
* receive remaining in the buffers.
*/
@Test
def remoteCloseWithCompleteAndIncompleteBufferedReceives(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = true)
}
/**
* Verifies that pending buffered receives are processed when remote connection is closed
* until a response send fails.
*/
@Test
def remoteCloseWithBufferedReceivesFailedSend(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = false, responseRequiredIndex = 1)
}
/**
* Verifies that all pending buffered receives are processed for channel in closing state.
* The channel must be closed after pending receives are processed.
*/
@Test
def closingChannelWithBufferedReceives(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = false, makeClosing = true)
}
/**
* Verifies that all pending buffered receives are processed for channel in closing state.
* The channel must be closed after complete receives are processed, even if there is an incomplete
* receive remaining in the buffers.
*/
@Test
def closingChannelWithCompleteAndIncompleteBufferedReceives(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = true, makeClosing = false)
}
/**
* Verifies that pending buffered receives are processed for a channel in closing state
* until a response send fails.
*/
@Test
def closingChannelWithBufferedReceivesFailedSend(): Unit = {
verifyRemoteCloseWithBufferedReceives(numComplete = 3, hasIncomplete = false, responseRequiredIndex = 1, makeClosing = false)
}
/**
* Verifies handling of client disconnections when the server-side channel is in the state
* specified using the parameters.
*
* @param numComplete Number of complete buffered requests
* @param hasIncomplete If true, add an additional partial buffered request
* @param responseRequiredIndex Index of the buffered request for which a response is sent. Previous requests
* are completed without a response. If set to -1, all `numComplete` requests
* are completed without a response.
* @param makeClosing If true, put the channel into closing state in the server Selector.
*/
private def verifyRemoteCloseWithBufferedReceives(numComplete: Int,
hasIncomplete: Boolean,
responseRequiredIndex: Int = -1,
makeClosing: Boolean = false): Unit = {
props ++= sslServerProps
// Truncates the last request in the SSL buffers by directly updating the buffers to simulate partial buffered request
def truncateBufferedRequest(channel: KafkaChannel): Unit = {
val transportLayer: SslTransportLayer = JTestUtils.fieldValue(channel, classOf[KafkaChannel], "transportLayer")
val netReadBuffer: ByteBuffer = JTestUtils.fieldValue(transportLayer, classOf[SslTransportLayer], "netReadBuffer")
val appReadBuffer: ByteBuffer = JTestUtils.fieldValue(transportLayer, classOf[SslTransportLayer], "appReadBuffer")
if (appReadBuffer.position() > 4) {
appReadBuffer.position(4)
netReadBuffer.position(0)
} else {
netReadBuffer.position(20)
}
}
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
val proxyServer = new ProxyServer(testableServer)
try {
// Step 1: Send client requests.
// a) request1 is sent by the client to ProxyServer and this is directly sent to the server. This
// ensures that server-side channel is in muted state until this request is processed in Step 3.
// b) `numComplete` requests are sent and buffered in the server-side channel's SSL buffers
// c) If `hasIncomplete=true`, an extra request is sent and buffered as in b). This will be truncated later
// when previous requests have been processed and only one request is remaining in the SSL buffer,
// making it easy to truncate.
val numBufferedRequests = numComplete + (if (hasIncomplete) 1 else 0)
val (socket, request1) = makeSocketWithBufferedRequests(testableServer, testableSelector, proxyServer, numBufferedRequests)
val channel = openChannel(request1, testableServer).getOrElse(throw new IllegalStateException("Channel closed too early"))
// Step 2: Close the client-side socket and the proxy socket to the server, triggering close notification in the
// server when the client is unmuted in Step 3. Get the channel into its desired closing/buffered state.
socket.close()
proxyServer.serverConnSocket.close()
TestUtils.waitUntilTrue(() => proxyServer.clientConnSocket.isClosed, "Client socket not closed")
if (makeClosing)
testableSelector.pendingClosingChannels.add(channel)
if (numComplete == 0 && hasIncomplete)
truncateBufferedRequest(channel)
// Step 3: Process the first request. Verify that the channel is not removed since the channel
// should be retained to process buffered data.
processRequestNoOpResponse(testableServer.dataPlaneRequestChannel, request1)
assertSame(channel, openOrClosingChannel(request1, testableServer).getOrElse(throw new IllegalStateException("Channel closed too early")))
// Step 4: Process buffered data. if `responseRequiredIndex>=0`, the channel should be failed and removed when
// attempting to send response. Otherwise, the channel should be removed when all completed buffers are processed.
// Channel should be closed and removed even if there is a partial buffered request when `hasIncomplete=true`
val numRequests = if (responseRequiredIndex >= 0) responseRequiredIndex + 1 else numComplete
(0 until numRequests).foreach { i =>
val request = receiveRequest(testableServer.dataPlaneRequestChannel)
if (i == numComplete - 1 && hasIncomplete)
truncateBufferedRequest(channel)
if (responseRequiredIndex == i)
processRequest(testableServer.dataPlaneRequestChannel, request)
else
processRequestNoOpResponse(testableServer.dataPlaneRequestChannel, request)
}
testableServer.waitForChannelClose(channel.id, locallyClosed = false)
// Verify that SocketServer is healthy
val anotherSocket = sslConnect(testableServer)
assertProcessorHealthy(testableServer, Seq(anotherSocket))
} finally {
proxyServer.close()
}
})
}
/**
* Tests idle channel expiry for SSL channels with buffered data. Muted channels are expired
* immediately even if there is pending data to be processed. This is consistent with PLAINTEXT where
* we expire muted channels even if there is data available on the socket. This scenario occurs if broker
* takes longer than idle timeout to process a client request. In this case, typically client would have
* expired its connection and would potentially reconnect to retry the request, so immediate expiry enables
* the old connection and its associated resources to be freed sooner.
*/
@Test
def idleExpiryWithBufferedReceives(): Unit = {
val idleTimeMs = 60000
val time = new MockTime()
props.put(KafkaConfig.ConnectionsMaxIdleMsProp, idleTimeMs.toString)
props ++= sslServerProps
val testableServer = new TestableSocketServer(time = time)
testableServer.startup()
assertTrue(testableServer.controlPlaneRequestChannelOpt.isEmpty)
val proxyServer = new ProxyServer(testableServer)
try {
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sleepTimeMs = idleTimeMs / 2 + 1
val (socket, request) = makeSocketWithBufferedRequests(testableServer, testableSelector, proxyServer)
// advance mock time in increments to verify that muted sockets with buffered data dont have their idle time updated
// additional calls to poll() should not update the channel last idle time
for (_ <- 0 to 3) {
time.sleep(sleepTimeMs)
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
}
testableServer.waitForChannelClose(request.context.connectionId, locallyClosed = false)
val otherSocket = sslConnect(testableServer)
assertProcessorHealthy(testableServer, Seq(otherSocket))
socket.close()
} finally {
proxyServer.close()
shutdownServerAndMetrics(testableServer)
}
}
@Test
def testUnmuteChannelWithBufferedReceives(): Unit = {
val time = new MockTime()
props ++= sslServerProps
val testableServer = new TestableSocketServer(time = time)
testableServer.startup()
val proxyServer = new ProxyServer(testableServer)
try {
val testableSelector = testableServer.testableSelector
val (socket, request) = makeSocketWithBufferedRequests(testableServer, testableSelector, proxyServer)
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
val keysWithBufferedRead: util.Set[SelectionKey] = JTestUtils.fieldValue(testableSelector, classOf[Selector], "keysWithBufferedRead")
assertEquals(Set.empty, keysWithBufferedRead.asScala)
processRequest(testableServer.dataPlaneRequestChannel, request)
// buffered requests should be processed after channel is unmuted
receiveRequest(testableServer.dataPlaneRequestChannel)
socket.close()
} finally {
proxyServer.close()
shutdownServerAndMetrics(testableServer)
}
}
/**
* Tests exception handling in [[Processor.processCompletedReceives]]. Exception is
* injected into [[Selector.mute]] which is used to mute the channel when a receive is complete.
* Test creates two receives in a single iteration by caching completed receives until two receives
* are complete.
* Verifies that
* - first failed channel is closed
* - second receive is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processCompletedReceiveException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val sockets = (1 to 2).map(_ => connect(testableServer))
val testableSelector = testableServer.testableSelector
val requestChannel = testableServer.dataPlaneRequestChannel
testableSelector.cachedCompletedReceives.minPerPoll = 2
testableSelector.addFailure(SelectorOperation.Mute)
sockets.foreach(sendRequest(_, producerRequestBytes()))
val requests = sockets.map(_ => receiveRequest(requestChannel))
testableSelector.waitForOperations(SelectorOperation.Mute, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
requests.foreach(processRequest(requestChannel, _))
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processCompletedSends]]. Exception is
* injected into [[Selector.unmute]] which is used to unmute the channel after send is complete.
* Test creates two completed sends in a single iteration by caching completed sends until two
* sends are complete.
* Verifies that
* - first failed channel is closed
* - second send is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processCompletedSendException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
val sockets = (1 to 2).map(_ => connect(testableServer))
val requests = sockets.map(sendAndReceiveRequest(_, testableServer))
testableSelector.addFailure(SelectorOperation.Unmute)
requests.foreach(processRequest(testableServer.dataPlaneRequestChannel, _))
testableSelector.waitForOperations(SelectorOperation.Unmute, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processDisconnected]]. An invalid connectionId
* is inserted to the disconnected list just before the actual valid one.
* Verifies that
* - first invalid connectionId is ignored
* - second disconnected channel is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processDisconnectedException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val (socket, connectionId) = connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
// Add an invalid connectionId to `Selector.disconnected` list before the actual disconnected channel
// and check that the actual id is processed and the invalid one ignored.
testableSelector.cachedDisconnected.minPerPoll = 2
testableSelector.cachedDisconnected.deferredValues += "notAValidConnectionId" -> ChannelState.EXPIRED
socket.close()
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
testableServer.waitForChannelClose(connectionId, locallyClosed = false)
assertProcessorHealthy(testableServer)
})
}
/**
* Tests that `Processor` continues to function correctly after a failed [[Selector.poll]].
*/
@Test
def pollException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val (socket, _) = connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
testableSelector.addFailure(SelectorOperation.Poll)
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 2)
assertProcessorHealthy(testableServer, Seq(socket))
})
}
/**
* Tests handling of `ControlThrowable`. Verifies that the selector is closed.
*/
@Test
def controlThrowable(): Unit = {
withTestableServer (testWithServer = { testableServer =>
connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
testableSelector.operationCounts.clear()
testableSelector.addFailure(SelectorOperation.Poll,
Some(new ControlThrowable() {}))
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
testableSelector.waitForOperations(SelectorOperation.CloseSelector, 1)
assertEquals(1, testableServer.uncaughtExceptions)
testableServer.uncaughtExceptions = 0
})
}
@Test
def testConnectionRateLimit(): Unit = {
shutdownServerAndMetrics(server)
val numConnections = 5
props.put("max.connections.per.ip", numConnections.toString)
val testableServer = new TestableSocketServer(KafkaConfig.fromProps(props), connectionQueueSize = 1)
testableServer.startup()
val testableSelector = testableServer.testableSelector
val errors = new mutable.HashSet[String]
def acceptorStackTraces: scala.collection.Map[Thread, String] = {
Thread.getAllStackTraces.asScala.collect {
case (thread, stacktraceElement) if thread.getName.contains("kafka-socket-acceptor") =>
thread -> stacktraceElement.mkString("\\n")
}
}
def acceptorBlocked: Boolean = {
val stackTraces = acceptorStackTraces
if (stackTraces.isEmpty)
errors.add(s"Acceptor thread not found, threads=${Thread.getAllStackTraces.keySet}")
stackTraces.exists { case (thread, stackTrace) =>
thread.getState == Thread.State.WAITING && stackTrace.contains("ArrayBlockingQueue")
}
}
def registeredConnectionCount: Int = testableSelector.operationCounts.getOrElse(SelectorOperation.Register, 0)
try {
// Block selector until Acceptor is blocked while connections are pending
testableSelector.pollCallback = () => {
try {
TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked,
"Acceptor not blocked", waitTimeMs = 10000)
} catch {
case _: Throwable => errors.add(s"Acceptor not blocked: $acceptorStackTraces")
}
}
testableSelector.operationCounts.clear()
val sockets = (1 to numConnections).map(_ => connect(testableServer))
TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount == numConnections,
"Connections not registered", waitTimeMs = 15000)
assertEquals(Set.empty, errors)
testableSelector.waitForOperations(SelectorOperation.Register, numConnections)
// In each iteration, SocketServer processes at most connectionQueueSize (1 in this test)
// new connections and then does poll() to process data from existing connections. So for
// 5 connections, we expect 5 iterations. Since we stop when the 5th connection is processed,
// we can safely check that there were at least 4 polls prior to the 5th connection.
val pollCount = testableSelector.operationCounts(SelectorOperation.Poll)
assertTrue(pollCount >= numConnections - 1, s"Connections created too quickly: $pollCount")
verifyAcceptorBlockedPercent("PLAINTEXT", expectBlocked = true)
assertProcessorHealthy(testableServer, sockets)
} finally {
shutdownServerAndMetrics(testableServer)
}
}
@Test
def testControlPlaneAsPrivilegedListener(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "PLAINTEXT://localhost:0,CONTROLLER://localhost:0")
testProps.put("listener.security.protocol.map", "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT")
testProps.put("control.plane.listener.name", "CONTROLLER")
val config = KafkaConfig.fromProps(testProps)
withTestableServer(config, { testableServer =>
val controlPlaneSocket = connect(testableServer, config.controlPlaneListenerName.get,
localAddr = InetAddress.getLocalHost)
val sentRequest = sendAndReceiveControllerRequest(controlPlaneSocket, testableServer)
assertTrue(sentRequest.context.fromPrivilegedListener)
val plainSocket = connect(testableServer, localAddr = InetAddress.getLocalHost)
val plainRequest = sendAndReceiveRequest(plainSocket, testableServer)
assertFalse(plainRequest.context.fromPrivilegedListener)
})
}
@Test
def testInterBrokerListenerAsPrivilegedListener(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0")
testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT")
testProps.put("inter.broker.listener.name", "INTERNAL")
val config = KafkaConfig.fromProps(testProps)
withTestableServer(config, { testableServer =>
val interBrokerSocket = connect(testableServer, config.interBrokerListenerName,
localAddr = InetAddress.getLocalHost)
val sentRequest = sendAndReceiveRequest(interBrokerSocket, testableServer)
assertTrue(sentRequest.context.fromPrivilegedListener)
val externalSocket = connect(testableServer, new ListenerName("EXTERNAL"),
localAddr = InetAddress.getLocalHost)
val externalRequest = sendAndReceiveRequest(externalSocket, testableServer)
assertFalse(externalRequest.context.fromPrivilegedListener)
})
}
@Test
def testControlPlaneTakePrecedenceOverInterBrokerListenerAsPrivilegedListener(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "EXTERNAL://localhost:0,INTERNAL://localhost:0,CONTROLLER://localhost:0")
testProps.put("listener.security.protocol.map", "EXTERNAL:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT")
testProps.put("control.plane.listener.name", "CONTROLLER")
testProps.put("inter.broker.listener.name", "INTERNAL")
val config = KafkaConfig.fromProps(testProps)
withTestableServer(config, { testableServer =>
val controlPlaneSocket = connect(testableServer, config.controlPlaneListenerName.get,
localAddr = InetAddress.getLocalHost)
val controlPlaneRequest = sendAndReceiveControllerRequest(controlPlaneSocket, testableServer)
assertTrue(controlPlaneRequest.context.fromPrivilegedListener)
val interBrokerSocket = connect(testableServer, config.interBrokerListenerName,
localAddr = InetAddress.getLocalHost)
val interBrokerRequest = sendAndReceiveRequest(interBrokerSocket, testableServer)
assertFalse(interBrokerRequest.context.fromPrivilegedListener)
val externalSocket = connect(testableServer, new ListenerName("EXTERNAL"),
localAddr = InetAddress.getLocalHost)
val externalRequest = sendAndReceiveRequest(externalSocket, testableServer)
assertFalse(externalRequest.context.fromPrivilegedListener)
})
}
private def sslServerProps: Properties = {
val trustStoreFile = File.createTempFile("truststore", ".jks")
val sslProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, interBrokerSecurityProtocol = Some(SecurityProtocol.SSL),
trustStoreFile = Some(trustStoreFile))
sslProps.put(KafkaConfig.ListenersProp, "SSL://localhost:0")
sslProps
}
private def withTestableServer(config : KafkaConfig = KafkaConfig.fromProps(props),
testWithServer: TestableSocketServer => Unit): Unit = {
val testableServer = new TestableSocketServer(config)
testableServer.startup()
try {
testWithServer(testableServer)
} finally {
shutdownServerAndMetrics(testableServer)
assertEquals(0, testableServer.uncaughtExceptions)
}
}
def sendAndReceiveControllerRequest(socket: Socket, server: SocketServer): RequestChannel.Request = {
sendRequest(socket, producerRequestBytes())
receiveRequest(server.controlPlaneRequestChannelOpt.get)
}
private def assertProcessorHealthy(testableServer: TestableSocketServer, healthySockets: Seq[Socket] = Seq.empty): Unit = {
val selector = testableServer.testableSelector
selector.reset()
val requestChannel = testableServer.dataPlaneRequestChannel
// Check that existing channels behave as expected
healthySockets.foreach { socket =>
val request = sendAndReceiveRequest(socket, testableServer)
processRequest(requestChannel, request)
socket.close()
}
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 0, "Channels not removed")
// Check new channel behaves as expected
val (socket, connectionId) = connectAndProcessRequest(testableServer)
assertArrayEquals(producerRequestBytes(), receiveResponse(socket))
assertNotNull(selector.channel(connectionId), "Channel should not have been closed")
assertNull(selector.closingChannel(connectionId), "Channel should not be closing")
socket.close()
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 0, "Channels not removed")
}
// Since all sockets use the same local host, it is sufficient to check the local port
def isSocketConnectionId(connectionId: String, socket: Socket): Boolean =
connectionId.contains(s":${socket.getLocalPort}-")
private def verifyAcceptorBlockedPercent(listenerName: String, expectBlocked: Boolean): Unit = {
val blockedPercentMetricMBeanName = "kafka.network:type=Acceptor,name=AcceptorBlockedPercent,listener=PLAINTEXT"
val blockedPercentMetrics = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.filter { case (k, _) =>
k.getMBeanName == blockedPercentMetricMBeanName
}.values
assertEquals(1, blockedPercentMetrics.size)
val blockedPercentMetric = blockedPercentMetrics.head.asInstanceOf[Meter]
val blockedPercent = blockedPercentMetric.meanRate
if (expectBlocked) {
assertTrue(blockedPercent > 0.0, s"Acceptor blocked percent not recorded: $blockedPercent")
assertTrue(blockedPercent <= 1.0, s"Unexpected blocked percent in acceptor: $blockedPercent")
} else {
assertEquals(0.0, blockedPercent, 0.001)
}
}
class TestableSocketServer(
config : KafkaConfig = KafkaConfig.fromProps(props),
connectionQueueSize: Int = 20,
time: Time = Time.SYSTEM
) extends SocketServer(
config, new Metrics, time, credentialProvider, apiVersionManager,
) {
@volatile var selector: Option[TestableSelector] = None
@volatile var uncaughtExceptions = 0
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool, isPrivilegedListener: Boolean = false): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, requestChannel, connectionQuotas, config.connectionsMaxIdleMs,
config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics, credentialProvider,
memoryPool, new LogContext(), connectionQueueSize, isPrivilegedListener, apiVersionManager) {
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics, metricTags.asScala)
selector = Some(testableSelector)
testableSelector
}
override private[network] def processException(errorMessage: String, throwable: Throwable): Unit = {
if (errorMessage.contains("uncaught exception"))
uncaughtExceptions += 1
super.processException(errorMessage, throwable)
}
}
}
def testableSelector: TestableSelector =
selector.getOrElse(throw new IllegalStateException("Selector not created"))
def waitForChannelClose(connectionId: String, locallyClosed: Boolean): Unit = {
val selector = testableSelector
if (locallyClosed) {
TestUtils.waitUntilTrue(() => selector.allLocallyClosedChannels.contains(connectionId),
s"Channel not closed: $connectionId")
assertTrue(testableSelector.allDisconnectedChannels.isEmpty, "Unexpected disconnect notification")
} else {
TestUtils.waitUntilTrue(() => selector.allDisconnectedChannels.contains(connectionId),
s"Disconnect notification not received: $connectionId")
assertTrue(testableSelector.allLocallyClosedChannels.isEmpty, "Channel closed locally")
}
val openCount = selector.allChannels.size - 1 // minus one for the channel just closed above
TestUtils.waitUntilTrue(() => connectionCount(localAddress) == openCount, "Connection count not decremented")
TestUtils.waitUntilTrue(() =>
dataPlaneProcessor(0).inflightResponseCount == 0, "Inflight responses not cleared")
assertNull(selector.channel(connectionId), "Channel not removed")
assertNull(selector.closingChannel(connectionId), "Closing channel not removed")
}
}
sealed trait SelectorOperation
object SelectorOperation {
case object Register extends SelectorOperation
case object Poll extends SelectorOperation
case object Send extends SelectorOperation
case object Mute extends SelectorOperation
case object Unmute extends SelectorOperation
case object Wakeup extends SelectorOperation
case object Close extends SelectorOperation
case object CloseSelector extends SelectorOperation
}
class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: mutable.Map[String, String] = mutable.Map.empty)
extends Selector(config.socketRequestMaxBytes, config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs,
metrics, time, "socket-server", metricTags.asJava, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) {
val failures = mutable.Map[SelectorOperation, Throwable]()
val operationCounts = mutable.Map[SelectorOperation, Int]().withDefaultValue(0)
val allChannels = mutable.Set[String]()
val allLocallyClosedChannels = mutable.Set[String]()
val allDisconnectedChannels = mutable.Set[String]()
val allFailedChannels = mutable.Set[String]()
// Enable data from `Selector.poll()` to be deferred to a subsequent poll() until
// the number of elements of that type reaches `minPerPoll`. This enables tests to verify
// that failed processing doesn't impact subsequent processing within the same iteration.
abstract class PollData[T] {
var minPerPoll = 1
val deferredValues = mutable.Buffer[T]()
/**
* Process new results and return the results for the current poll if at least
* `minPerPoll` results are available including any deferred results. Otherwise
* add the provided values to the deferred set and return an empty buffer. This allows
* tests to process `minPerPoll` elements as the results of a single poll iteration.
*/
protected def update(newValues: mutable.Buffer[T]): mutable.Buffer[T] = {
val currentPollValues = mutable.Buffer[T]()
if (deferredValues.size + newValues.size >= minPerPoll) {
if (deferredValues.nonEmpty) {
currentPollValues ++= deferredValues
deferredValues.clear()
}
currentPollValues ++= newValues
} else
deferredValues ++= newValues
currentPollValues
}
/**
* Process results from the appropriate buffer in Selector and update the buffer to either
* defer and return nothing or return all results including previously deferred values.
*/
def updateResults(): Unit
}
class CompletedReceivesPollData(selector: TestableSelector) extends PollData[NetworkReceive] {
val completedReceivesMap: util.Map[String, NetworkReceive] = JTestUtils.fieldValue(selector, classOf[Selector], "completedReceives")
override def updateResults(): Unit = {
val currentReceives = update(selector.completedReceives.asScala.toBuffer)
completedReceivesMap.clear()
currentReceives.foreach { receive =>
val channelOpt = Option(selector.channel(receive.source)).orElse(Option(selector.closingChannel(receive.source)))
channelOpt.foreach { channel => completedReceivesMap.put(channel.id, receive) }
}
}
}
class CompletedSendsPollData(selector: TestableSelector) extends PollData[NetworkSend] {
override def updateResults(): Unit = {
val currentSends = update(selector.completedSends.asScala)
selector.completedSends.clear()
currentSends.foreach { selector.completedSends.add }
}
}
class DisconnectedPollData(selector: TestableSelector) extends PollData[(String, ChannelState)] {
override def updateResults(): Unit = {
val currentDisconnected = update(selector.disconnected.asScala.toBuffer)
selector.disconnected.clear()
currentDisconnected.foreach { case (channelId, state) => selector.disconnected.put(channelId, state) }
}
}
val cachedCompletedReceives = new CompletedReceivesPollData(this)
val cachedCompletedSends = new CompletedSendsPollData(this)
val cachedDisconnected = new DisconnectedPollData(this)
val allCachedPollData = Seq(cachedCompletedReceives, cachedCompletedSends, cachedDisconnected)
val pendingClosingChannels = new ConcurrentLinkedQueue[KafkaChannel]()
@volatile var minWakeupCount = 0
@volatile var pollTimeoutOverride: Option[Long] = None
@volatile var pollCallback: () => Unit = () => {}
def addFailure(operation: SelectorOperation, exception: Option[Throwable] = None): Unit = {
failures += operation ->
exception.getOrElse(new IllegalStateException(s"Test exception during $operation"))
}
private def onOperation(operation: SelectorOperation, connectionId: Option[String], onFailure: => Unit): Unit = {
operationCounts(operation) += 1
failures.remove(operation).foreach { e =>
connectionId.foreach(allFailedChannels.add)
onFailure
throw e
}
}
def waitForOperations(operation: SelectorOperation, minExpectedTotal: Int): Unit = {
TestUtils.waitUntilTrue(() =>
operationCounts.getOrElse(operation, 0) >= minExpectedTotal, "Operations not performed within timeout")
}
def runOp[T](operation: SelectorOperation, connectionId: Option[String],
onFailure: => Unit = {})(code: => T): T = {
// If a failure is set on `operation`, throw that exception even if `code` fails
try code
finally onOperation(operation, connectionId, onFailure)
}
override def register(id: String, socketChannel: SocketChannel): Unit = {
runOp(SelectorOperation.Register, Some(id), onFailure = close(id)) {
super.register(id, socketChannel)
}
}
override def send(s: NetworkSend): Unit = {
runOp(SelectorOperation.Send, Some(s.destinationId)) {
super.send(s)
}
}
override def poll(timeout: Long): Unit = {
try {
assertEquals(0, super.completedReceives().size)
assertEquals(0, super.completedSends().size)
pollCallback.apply()
while (!pendingClosingChannels.isEmpty) {
makeClosing(pendingClosingChannels.poll())
}
runOp(SelectorOperation.Poll, None) {
super.poll(pollTimeoutOverride.getOrElse(timeout))
}
} finally {
super.channels.forEach(allChannels += _.id)
allDisconnectedChannels ++= super.disconnected.asScala.keys
cachedCompletedReceives.updateResults()
cachedCompletedSends.updateResults()
cachedDisconnected.updateResults()
}
}
override def mute(id: String): Unit = {
runOp(SelectorOperation.Mute, Some(id)) {
super.mute(id)
}
}
override def unmute(id: String): Unit = {
runOp(SelectorOperation.Unmute, Some(id)) {
super.unmute(id)
}
}
override def wakeup(): Unit = {
runOp(SelectorOperation.Wakeup, None) {
if (minWakeupCount > 0)
minWakeupCount -= 1
if (minWakeupCount <= 0)
super.wakeup()
}
}
override def close(id: String): Unit = {
runOp(SelectorOperation.Close, Some(id)) {
super.close(id)
allLocallyClosedChannels += id
}
}
override def close(): Unit = {
runOp(SelectorOperation.CloseSelector, None) {
super.close()
}
}
def updateMinWakeup(count: Int): Unit = {
minWakeupCount = count
// For tests that ignore wakeup to process responses together, increase poll timeout
// to ensure that poll doesn't complete before the responses are ready
pollTimeoutOverride = Some(1000L)
// Wakeup current poll to force new poll timeout to take effect
super.wakeup()
}
def reset(): Unit = {
failures.clear()
allCachedPollData.foreach(_.minPerPoll = 1)
}
def notFailed(sockets: Seq[Socket]): Seq[Socket] = {
// Each test generates failure for exactly one failed channel
assertEquals(1, allFailedChannels.size)
val failedConnectionId = allFailedChannels.head
sockets.filterNot(socket => isSocketConnectionId(failedConnectionId, socket))
}
private def makeClosing(channel: KafkaChannel): Unit = {
val channels: util.Map[String, KafkaChannel] = JTestUtils.fieldValue(this, classOf[Selector], "channels")
val closingChannels: util.Map[String, KafkaChannel] = JTestUtils.fieldValue(this, classOf[Selector], "closingChannels")
closingChannels.put(channel.id, channel)
channels.remove(channel.id)
}
}
/**
* Proxy server used to intercept connections to SocketServer. This is used for testing SSL channels
* with buffered data. A single SSL client is expected to be created by the test using this ProxyServer.
* By default, data between the client and the server is simply transferred across to the destination by ProxyServer.
* Tests can enable buffering in ProxyServer to directly copy incoming data from the client to the server-side
* channel's `netReadBuffer` to simulate scenarios with SSL buffered data.
*/
private class ProxyServer(socketServer: SocketServer) {
val serverSocket = new ServerSocket(0)
val localPort = serverSocket.getLocalPort
val serverConnSocket = new Socket("localhost", socketServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SSL)))
val executor = Executors.newFixedThreadPool(2)
@volatile var clientConnSocket: Socket = _
@volatile var buffer: Option[ByteBuffer] = None
executor.submit((() => {
try {
clientConnSocket = serverSocket.accept()
val serverOut = serverConnSocket.getOutputStream
val clientIn = clientConnSocket.getInputStream
var b: Int = -1
while ({b = clientIn.read(); b != -1}) {
buffer match {
case Some(buf) =>
buf.put(b.asInstanceOf[Byte])
case None =>
serverOut.write(b)
serverOut.flush()
}
}
} finally {
clientConnSocket.close()
}
}): Runnable)
executor.submit((() => {
var b: Int = -1
val serverIn = serverConnSocket.getInputStream
while ({b = serverIn.read(); b != -1}) {
clientConnSocket.getOutputStream.write(b)
}
}): Runnable)
def enableBuffering(buffer: ByteBuffer): Unit = this.buffer = Some(buffer)
def close(): Unit = {
serverSocket.close()
serverConnSocket.close()
clientConnSocket.close()
executor.shutdownNow()
assertTrue(executor.awaitTermination(10, TimeUnit.SECONDS))
}
}
}
|
Chasego/kafka
|
core/src/test/scala/unit/kafka/network/SocketServerTest.scala
|
Scala
|
apache-2.0
| 102,648
|
package com.uebercomputing.io
import java.io.FileFilter
import java.io.File
/**
* Only show files that match one of the extensions or directories.
*/
class FileExtensionFilter(extensions: String*) extends FileFilter {
def accept(path: File): Boolean = {
val result = if (path.isDirectory()) true
else {
val name = path.getName
extensions.foldLeft(false)((resultSoFar, currSuffix) => resultSoFar || name.endsWith(currSuffix))
}
result
}
}
|
medale/extension-sorter
|
src/main/scala/com/uebercomputing/io/FileExtensionFilter.scala
|
Scala
|
apache-2.0
| 475
|
package org.brandonhaynes.bfs
import java.io.{DataInput, DataOutput}
import scala.language.implicitConversions
import org.apache.hadoop.io._
/** Implicit helpers useful for converting primitives into graph-compatible entities (ids and metadata) */
object Vertex {
/** Implicitly convert a long value into a vertex id */
implicit def toWritableId(id:Long) = new VertexId(id)
/** Implicitly convert a long value into a vertex with that distance and no adjacencies */
implicit def toWritableVertex(distance:Long) = new VertexMetadata(distance)
/** Implicitly convert a traversable sequence of adjacencies into a vertex with no distance */
implicit def toWritableVertex(adjacencies:TraversableOnce[Long]) = new VertexMetadata(adjacencies)
/** Implicitly convert a distance/adjacency pair into a vertex */
implicit def toWritableVertex(pair:(Option[Long], TraversableOnce[Long])) = new VertexMetadata(pair)
/** Implicitly convert a Hadoop Text instance into a vertex */
private[brandonhaynes] implicit def toWritableVertex(value:Text) = new VertexMetadata(value)
/** Implicitly convert a vertex into a Hadoop Text instance */
private[brandonhaynes] implicit def toText(metadata:VertexMetadata) = new Text(metadata.toString)
}
/** Utility object for parsing strings as vertices */
object VertexMetadata {
// Vertices with defined distances, e.g. "(999, [1,2,3])"
private val pairPattern = """^\\((\\d+),\\W*\\[((?:\\d++(?:,\\W*)?)*+)\\]\\)""".r
// Vertices without defined distances, e.g., "[1,2,3]"
private val adjacencyPattern = """^\\[((?:\\d++(?:,\\W*+)?)*+)\\]""".r
/** Parse a vertex as one of the following forms
* Empty string === (None, [])
* Without defined distance [1,2,3] === (None, [1,2,3])
* With defined distance (999, [1,2,3]) === (999, [1,2,3])
*
* @param text String to parse
* @return A valid vertex instance (empty if parse fails)
*/
protected def parse(text:String):(Option[Long], List[Long]) =
text match {
case "" => (None, Nil)
case pairPattern(distance, adjacencies) => parse(distance, adjacencies)
case adjacencyPattern(adjacencies) => parse("", adjacencies)
case _ => throw new IllegalArgumentException("Could not parse argument '%s'".format(text))
}
/** Parse a string containing a distance value and comma-separated list of adjacencies
*
* @param distanceText String containing a distance integer (or either "None" or "" if none)
* @param adjacencyText Comma-separated list of adjacency identifiers
* @return The pair (optional distance, list of adjacencies)
*/
private def parse(distanceText:String, adjacencyText:String) =
(if(!distanceText.isEmpty && distanceText != "None") Some(distanceText.toLong) else None,
adjacencyText.split(",").map(_.trim).filterNot(_.isEmpty).map(_.toLong).toList)
}
/** Class that represents a vertex identifier, suitable for map/reduce graph algorithms.
*
* (This class is essentially a wrapper over a single integer value with good utility/helper function support.)
*
* Note that the identifier is mutable only to support the Hadoop writable interface, and only mutated through
* the readFields function. Care has been taken to limit the scope with which post-construction mutation of
* the identifier may be performed.
*
* @param _id Vertex identifier
*/
class VertexId(private var _id:Long) extends WritableComparable[VertexId] {
/** Create a vertex with an undefined id; use of this constructor should be avoided wherever possible */
protected def this() = this(0)
def id = _id
protected override def write(output:DataOutput) = output.writeUTF(toString)
protected override def readFields(input:DataInput) = _id = input.readUTF.toLong
override def toString = id.toString
override def hashCode = id.hashCode
override def compareTo(other:VertexId) = id.compareTo(other.id)
override def equals(other:Any) = other match {
case that:VertexId => that.getClass == getClass && that.id == id
case _ => false
}
}
/** Class that represents the metadata associated with a vertex for use under SS-BFS.
*
* This class exists as a thin wrapper over a (Option[Long], Traversable[Long]) pair.
*
* Note that the metadata are mutable only to support the Hadoop writable interface, and only mutated through
* the readFields function. Care has been taken to limit the scope with which post-construction mutation of
* the identifier may be performed.
*
* @param _distance Distance associated with the given vertex
* @param _adjacencies Set of adjacencies for this vertex (multiple edges between vertices are allowed)
*/
class VertexMetadata(private var _distance:Option[Long], private var _adjacencies:List[Long]) extends Writable {
/** Create a vertex with no known distance and no adjacencies */
def this() = this(None:Option[Long], Nil)
/** Create a vertex with the given (optional) distance and traversable set of adjacencies */
def this(pair:(Option[Long], TraversableOnce[Long])) = this(pair._1, pair._2.toList)
/* Create a vertex with the given (optional) integer distance and traversable set of adjacencies */
def this(distance:Option[Int], adjacencies:TraversableOnce[Long]) = this(distance.map(_.toLong), adjacencies.toList)
/** Create a vertex with no known distance and the traversable set of adjacencies */
def this(adjacencies:TraversableOnce[Long]) = this(None:Option[Long], adjacencies.toList)
/** Create a vertex with the given (known) distance and no adjacencies */
def this(distance:Long) = this(Option(distance), Nil)
/** Create a vertex by parsing the given string for metadata; see VertexMetadata.parse for valid formats */
def this(text:String) = this(VertexMetadata.parse(text))
/** Create a vertex by parsing the given Text instance for metadata; see VertexMetadata.parse for valid formats */
def this(text:Text) = this(text.toString)
lazy val distance = _distance
lazy val adjacencies = _adjacencies
def write(output:DataOutput) = output.writeUTF(toString)
def readFields(input:DataInput) =
VertexMetadata.parse(input.readUTF()) match {
case (newDistance, newAdjacencies) =>
_distance = newDistance
_adjacencies = newAdjacencies
}
override def toString =
(distance, adjacencies) match {
case (None, Nil) => ""
case (None, _) => "[" + adjacencies.mkString(",") + "]"
case (Some(knownDistance), _) => "(%s, [%s])".format(knownDistance, adjacencies.mkString(","))
}
override def equals(other: Any) = other match {
case that:VertexMetadata => that.getClass == getClass &&
that.distance == distance &&
that.adjacencies == adjacencies
case _ => false
}
override def hashCode = 31 * (distance.hashCode ^ adjacencies.hashCode)
}
|
BrandonHaynes/timr
|
src/org/brandonhaynes/bfs/Vertex.scala
|
Scala
|
mit
| 6,866
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package common.build
//#assets-builder
package controllers.admin
import _root_.controllers.AssetsMetadata
import play.api.http.HttpErrorHandler
import javax.inject._
class Assets @Inject() (errorHandler: HttpErrorHandler, assetsMetadata: AssetsMetadata) extends controllers.AssetsBuilder(errorHandler, assetsMetadata)
//#assets-builder
import play.api.mvc._
class HomeController @Inject()(components: ControllerComponents) extends AbstractController(components) {
def index = Action(Ok)
}
|
wsargent/playframework
|
documentation/manual/working/commonGuide/build/code/SubProjectsAssetsBuilder.scala
|
Scala
|
apache-2.0
| 574
|
package lms
import com.oracle.truffle.api._
import com.oracle.truffle.api.frame._
import com.oracle.truffle.api.nodes._
import com.oracle.truffle.api.nodes.Node._
import scala.annotation.target.field
import org.scalatest._
trait BooleanType extends Base with Types {
case class BooleanAnd(@(Child @field) x: Exp[Boolean], @(Child @field) y: Exp[Boolean]) extends Def[Boolean] {
def execute(frame: VirtualFrame) = {
x.execute(frame) && y.execute(frame)
}
}
case class BooleanNot(@(Child @field) x: Exp[Boolean]) extends Def[Boolean] {
def execute(frame: VirtualFrame) = {
!x.execute(frame)
}
}
def boolean_and(x: Exp[Boolean], y: Exp[Boolean]): Exp[Boolean] = x match {
case Const(true) => y
case Const(false) => false
case _ => reflect(BooleanAnd(x, y))
}
def boolean_not(x: Exp[Boolean]): Exp[Boolean] = x match {
case Const(true) => false
case Const(false) => true
case _ => reflect(BooleanNot(x))
}
implicit class BooleanOps(x: Exp[Boolean]) {
def &&(y: Exp[Boolean]): Exp[Boolean] = boolean_and(x, y)
def unary_!(): Exp[Boolean] = boolean_not(x)
}
}
|
RomanTsegelskyi/lms-truffle
|
src/main/scala/lms/BooleanType.scala
|
Scala
|
gpl-2.0
| 1,144
|
package com.spotsinc.publisher.avro
import java.io.{ByteArrayOutputStream, File, FileOutputStream}
import java.util.UUID
import com.sksamuel.avro4s._
import com.spotify.google.cloud.pubsub.client.Message.encode
import com.spotify.google.cloud.pubsub.client.{Message, Publisher, Pubsub}
import org.apache.avro.Schema
import scala.collection.JavaConversions._
class AvroPublisher[T] {
private val KEY_VERSION = "version"
private val KEY_UUID = "uuid"
private val messageAttributes: (String, String) => Map[String, String] = (version: String, uuid: String) => {
Map(KEY_VERSION -> version, KEY_UUID -> uuid)
}
val pubsub: Pubsub = Pubsub.builder()
.build()
val publisher: Publisher = Publisher.builder()
.pubsub(pubsub)
.project(sys.env.getOrElse("ENV_GCC_PROJECT", ""))
.concurrency(128)
.build()
def createAvroSchema(message: T)(implicit schema: SchemaFor[T]): Schema = {
AvroSchema[T]
}
def serializeBinaryAvroMessage(message: Seq[T])(implicit schema: SchemaFor[T], toRecord: ToRecord[T]): Array[Byte] = {
val binarySerializer = new ByteArrayOutputStream()
val output = AvroOutputStream.data[T](binarySerializer)
output.write(message)
output.close()
binarySerializer.toByteArray
}
def deserializeBinaryAvroMessage(message: Array[Byte])(implicit schema: SchemaFor[T], fromRecord: FromRecord[T]): Seq[T] = {
val input = AvroInputStream.data[T](message)
val results = input.iterator.toSeq
input.close()
results
}
def publishBinary(message: Array[Byte], topic: String, version: String = "1.0.0", uuid: String = UUID.randomUUID().toString,
additionalAttributes: Map[String, String] = Map()): String = {
val attributes = messageAttributes(version, uuid) ++ additionalAttributes
val messageToPublish = Message.builder().attributes(attributes)
.data(encode(message))
.build()
publisher.publish(topic, messageToPublish).get()
}
}
|
LocalInc/local-library-publisher
|
publisher/src/main/scala/AvroPublisher.scala
|
Scala
|
apache-2.0
| 1,971
|
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.model
import com.waz.utils.crypto.AESUtils
import com.waz.utils.{JsonDecoder, JsonEncoder}
import org.json.JSONObject
import com.wire.cryptobox.PreKey
package object otr {
implicit lazy val PreKeyEncoder: JsonEncoder[PreKey] = new JsonEncoder[PreKey] {
override def apply(v: PreKey): JSONObject = JsonEncoder { o =>
o.put("id", v.id)
o.put("key", AESUtils.base64(v.data))
}
}
implicit lazy val PreKeyDecoder: JsonDecoder[PreKey] = new JsonDecoder[PreKey] {
override def apply(implicit js: JSONObject): PreKey = new PreKey(js.getInt("id"), AESUtils.base64(js.getString("key")))
}
}
|
wireapp/wire-android-sync-engine
|
zmessaging/src/main/scala/com/waz/model/otr/package.scala
|
Scala
|
gpl-3.0
| 1,333
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.tensor.{FloatType, Tensor}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* This file implements Batch Normalization as described in the paper:
* "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift"
* by Sergey Ioffe, Christian Szegedy
* This implementation is useful for inputs coming from convolution layers.
* For non-convolutional layers, see [[BatchNormalization]]
* The operation implemented is:
*
* ( x - mean(x) )
* y = -------------------- * gamma + beta
* standard-deviation(x)
*
* where gamma and beta are learnable parameters.
* The learning of gamma and beta is optional.
*/
@SerialVersionUID(- 9106336963903528047L)
class SpatialBatchNormalization[T: ClassTag](
nOutput: Int, eps: Double = 1e-5, momentum: Double = 0.1, affine: Boolean = true,
initWeight: Tensor[T] = null,
initBias: Tensor[T] = null,
initGradWeight: Tensor[T] = null,
initGradBias: Tensor[T] = null, dataFormat: DataFormat = DataFormat.NCHW)(
implicit ev: TensorNumeric[T])
extends BatchNormalization[T](nOutput, eps, momentum, affine,
initWeight, initBias, initGradWeight, initGradBias) {
override val nDim = 4
override def updateOutput(input: Tensor[T]): Tensor[T] = {
checkInputDim(input)
output.resizeAs(input)
_input.set(input)
makeBatch(_input)
val nInput = _input.size(channelDim)
if (runningMean.nElement == 0 || runningMean.nElement < nInput) {
initializeBuffer(nInput)
}
saveMean.resizeAs(runningMean).zero
saveStd.resizeAs(runningVar).fill(ev.zero)
if (dataFormat == DataFormat.NCHW) {
if (train) {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateOutputNCHWTrainFloat(
_input.asInstanceOf[Tensor[Float]], output.asInstanceOf[Tensor[Float]],
saveMean.asInstanceOf[Tensor[Float]], saveStd.asInstanceOf[Tensor[Float]],
runningMean.asInstanceOf[Tensor[Float]], runningVar.asInstanceOf[Tensor[Float]],
weight.asInstanceOf[Tensor[Float]], bias.asInstanceOf[Tensor[Float]],
eps.toFloat, momentum.toFloat, needFix = needFix)
} else {
SpatialBatchNormalization.updateOutputNCHWTrainDouble(
_input.asInstanceOf[Tensor[Double]], output.asInstanceOf[Tensor[Double]],
saveMean.asInstanceOf[Tensor[Double]], saveStd.asInstanceOf[Tensor[Double]],
runningMean.asInstanceOf[Tensor[Double]], runningVar.asInstanceOf[Tensor[Double]],
weight.asInstanceOf[Tensor[Double]], bias.asInstanceOf[Tensor[Double]],
eps, momentum, needFix = needFix)
}
} else {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateOutputNCHWInferFloat(
_input.asInstanceOf[Tensor[Float]], output.asInstanceOf[Tensor[Float]],
runningMean.asInstanceOf[Tensor[Float]], runningVar.asInstanceOf[Tensor[Float]],
weight.asInstanceOf[Tensor[Float]], bias.asInstanceOf[Tensor[Float]], eps.toFloat)
} else {
SpatialBatchNormalization.updateOutputNCHWInferDouble(
_input.asInstanceOf[Tensor[Double]], output.asInstanceOf[Tensor[Double]],
runningMean.asInstanceOf[Tensor[Double]], runningVar.asInstanceOf[Tensor[Double]],
weight.asInstanceOf[Tensor[Double]], bias.asInstanceOf[Tensor[Double]], eps)
}
}
} else {
if (train) {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateOutputNHWCTrainFloat(
_input.asInstanceOf[Tensor[Float]], output.asInstanceOf[Tensor[Float]],
saveMean.asInstanceOf[Tensor[Float]], saveStd.asInstanceOf[Tensor[Float]],
runningMean.asInstanceOf[Tensor[Float]], runningVar.asInstanceOf[Tensor[Float]],
weight.asInstanceOf[Tensor[Float]], bias.asInstanceOf[Tensor[Float]],
eps.toFloat, momentum.toFloat)
} else {
SpatialBatchNormalization.updateOutputNHWCTrainDouble(
_input.asInstanceOf[Tensor[Double]], output.asInstanceOf[Tensor[Double]],
saveMean.asInstanceOf[Tensor[Double]], saveStd.asInstanceOf[Tensor[Double]],
runningMean.asInstanceOf[Tensor[Double]], runningVar.asInstanceOf[Tensor[Double]],
weight.asInstanceOf[Tensor[Double]], bias.asInstanceOf[Tensor[Double]],
eps.toFloat, momentum.toFloat)
}
} else {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateOutputNHWCInferFloat(
_input.asInstanceOf[Tensor[Float]], output.asInstanceOf[Tensor[Float]],
runningMean.asInstanceOf[Tensor[Float]], runningVar.asInstanceOf[Tensor[Float]],
weight.asInstanceOf[Tensor[Float]], bias.asInstanceOf[Tensor[Float]], eps.toFloat)
} else {
SpatialBatchNormalization.updateOutputNHWCInferDouble(
_input.asInstanceOf[Tensor[Double]], output.asInstanceOf[Tensor[Double]],
runningMean.asInstanceOf[Tensor[Double]], runningVar.asInstanceOf[Tensor[Double]],
weight.asInstanceOf[Tensor[Double]], bias.asInstanceOf[Tensor[Double]], eps)
}
}
}
output
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
_gradOutput.set(gradOutput)
makeBatch(_gradOutput)
gxMean.zero()
gMean.zero()
if (dataFormat == DataFormat.NCHW) {
if (train) {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateGradInputNCHWTrainFloat(
_input.asInstanceOf[Tensor[Float]], _gradOutput.asInstanceOf[Tensor[Float]],
gradInput.asInstanceOf[Tensor[Float]], weight.asInstanceOf[Tensor[Float]],
saveMean.asInstanceOf[Tensor[Float]], saveStd.asInstanceOf[Tensor[Float]],
gMean.asInstanceOf[Tensor[Float]], gxMean.asInstanceOf[Tensor[Float]])
} else {
SpatialBatchNormalization.updateGradInputNCHWTrainDouble(
_input.asInstanceOf[Tensor[Double]], _gradOutput.asInstanceOf[Tensor[Double]],
gradInput.asInstanceOf[Tensor[Double]], weight.asInstanceOf[Tensor[Double]],
saveMean.asInstanceOf[Tensor[Double]], saveStd.asInstanceOf[Tensor[Double]],
gMean.asInstanceOf[Tensor[Double]], gxMean.asInstanceOf[Tensor[Double]])
}
} else {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateGradInputNCHWInferFloat(
_gradOutput.asInstanceOf[Tensor[Float]],
gradInput.asInstanceOf[Tensor[Float]], weight.asInstanceOf[Tensor[Float]],
bias.asInstanceOf[Tensor[Float]])
} else {
SpatialBatchNormalization.updateGradInputNCHWInferDouble(
_gradOutput.asInstanceOf[Tensor[Double]],
gradInput.asInstanceOf[Tensor[Double]], weight.asInstanceOf[Tensor[Double]],
bias.asInstanceOf[Tensor[Double]])
}
}
} else {
if (train) {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateGradInputNHWCTrainFloat(
_input.asInstanceOf[Tensor[Float]], _gradOutput.asInstanceOf[Tensor[Float]],
gradInput.asInstanceOf[Tensor[Float]], weight.asInstanceOf[Tensor[Float]],
saveMean.asInstanceOf[Tensor[Float]], saveStd.asInstanceOf[Tensor[Float]],
gMean.asInstanceOf[Tensor[Float]], gxMean.asInstanceOf[Tensor[Float]])
} else {
SpatialBatchNormalization.updateGradInputNHWCTrainDouble(
_input.asInstanceOf[Tensor[Double]], _gradOutput.asInstanceOf[Tensor[Double]],
gradInput.asInstanceOf[Tensor[Double]], weight.asInstanceOf[Tensor[Double]],
saveMean.asInstanceOf[Tensor[Double]], saveStd.asInstanceOf[Tensor[Double]],
gMean.asInstanceOf[Tensor[Double]], gxMean.asInstanceOf[Tensor[Double]])
}
} else {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.updateGradInputNHWCInferFloat(
_gradOutput.asInstanceOf[Tensor[Float]],
gradInput.asInstanceOf[Tensor[Float]], weight.asInstanceOf[Tensor[Float]],
bias.asInstanceOf[Tensor[Float]])
} else {
SpatialBatchNormalization.updateGradInputNHWCInferDouble(
_gradOutput.asInstanceOf[Tensor[Double]],
gradInput.asInstanceOf[Tensor[Double]], weight.asInstanceOf[Tensor[Double]],
bias.asInstanceOf[Tensor[Double]])
}
}
}
gradInput
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T]): Unit = {
if (weight == null || scaleW == 0) {
return
}
if (dataFormat == DataFormat.NCHW) {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.accGradientNCHWFloat(_gradOutput.asInstanceOf[Tensor[Float]],
gradWeight.asInstanceOf[Tensor[Float]], gradBias.asInstanceOf[Tensor[Float]],
_input.asInstanceOf[Tensor[Float]], saveMean.asInstanceOf[Tensor[Float]],
saveStd.asInstanceOf[Tensor[Float]], scaleW.toFloat, scaleB.toFloat)
} else {
SpatialBatchNormalization.accGradientNCHWDouble(_gradOutput.asInstanceOf[Tensor[Double]],
gradWeight.asInstanceOf[Tensor[Double]], gradBias.asInstanceOf[Tensor[Double]],
_input.asInstanceOf[Tensor[Double]], saveMean.asInstanceOf[Tensor[Double]],
saveStd.asInstanceOf[Tensor[Double]], scaleW, scaleB)
}
} else {
if (ev.getType() == FloatType) {
SpatialBatchNormalization.accGradientNHWCFloat(_gradOutput.asInstanceOf[Tensor[Float]],
gradWeight.asInstanceOf[Tensor[Float]], gradBias.asInstanceOf[Tensor[Float]],
_input.asInstanceOf[Tensor[Float]], saveMean.asInstanceOf[Tensor[Float]],
saveStd.asInstanceOf[Tensor[Float]], scaleW.toFloat, scaleB.toFloat)
} else {
SpatialBatchNormalization.accGradientNHWCDouble(_gradOutput.asInstanceOf[Tensor[Double]],
gradWeight.asInstanceOf[Tensor[Double]], gradBias.asInstanceOf[Tensor[Double]],
_input.asInstanceOf[Tensor[Double]], saveMean.asInstanceOf[Tensor[Double]],
saveStd.asInstanceOf[Tensor[Double]], scaleW, scaleB)
}
}
}
override def toString(): String = {
s"${getPrintName}[${ev.getType()}]($nOutput, $eps, $momentum, $affine)"
}
}
object SpatialBatchNormalization {
def apply[@specialized(Float, Double) T: ClassTag](
nOutput: Int,
eps: Double = 1e-5,
momentum: Double = 0.1,
affine: Boolean = true,
initWeight: Tensor[T] = null,
initBias: Tensor[T] = null,
initGradWeight: Tensor[T] = null,
initGradBias: Tensor[T] = null,
dataFormat: DataFormat = DataFormat.NCHW
)(implicit ev: TensorNumeric[T])
: SpatialBatchNormalization[T] = {
new SpatialBatchNormalization[T](nOutput, eps, momentum, affine,
initWeight, initBias, initGradWeight, initGradBias, dataFormat)
}
private[bigdl] def updateOutputNHWCInferFloat(input: Tensor[Float], output: Tensor[Float],
mean: Tensor[Float], variance: Tensor[Float], scale: Tensor[Float], offset: Tensor[Float],
eps: Float): Unit = {
require(input.isContiguous(), "BatchNorm NHWC require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(4)
val n = input.nElement()
val meanData = mean.storage().array()
val meanOffset = mean.storageOffset() - 1
val varData = variance.storage().array()
val varOffset = variance.storageOffset() - 1
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
var i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps).toFloat
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(c + meanOffset)) * invStd * scaleData(scaleOffset + c) +
offsetData(offsetOffset + c)
c += 1
}
i += nChannels
}
} else {
var i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps).toFloat
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(c + meanOffset)) * invStd
c += 1
}
i += nChannels
}
}
}
private[bigdl] def updateOutputNHWCInferDouble(input: Tensor[Double], output: Tensor[Double],
mean: Tensor[Double], variance: Tensor[Double], scale: Tensor[Double], offset: Tensor[Double],
eps: Double): Unit = {
require(input.isContiguous(), "BatchNorm NHWC require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(4)
val n = input.nElement()
val meanData = mean.storage().array()
val meanOffset = mean.storageOffset() - 1
val varData = variance.storage().array()
val varOffset = variance.storageOffset() - 1
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
var i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps)
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(meanOffset + c)) * invStd * scaleData(scaleOffset + c) +
offsetData(offsetOffset + c)
c += 1
}
i += nChannels
}
} else {
var i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps)
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(meanOffset + c)) * invStd
c += 1
}
i += nChannels
}
}
}
private[bigdl] def updateOutputNHWCTrainFloat(input: Tensor[Float], output: Tensor[Float],
saveMean: Tensor[Float], saveStd: Tensor[Float], runningMean: Tensor[Float],
runningVar: Tensor[Float], scale: Tensor[Float], offset: Tensor[Float],
eps: Float, momentum: Float,
batchVar: Tensor[Float] = null, saveVar: Tensor[Float] = null): Unit = {
require(input.isContiguous(), "BatchNorm NHWC require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(4)
if(saveMean.size(1) != nChannels) {
saveMean.resize(nChannels)
saveStd.resize(nChannels)
runningMean.resize(nChannels)
runningVar.resize(nChannels)
}
val meanData = saveMean.storage().array()
val meanOffset = saveMean.storageOffset() - 1
var i = 0
val n = input.nElement()
val frameSize = n / nChannels
while(i < n) {
var c = 0
while(c < nChannels) {
meanData(meanOffset + c) += inputData(inputOffset + i + c)
c += 1
}
i += nChannels
}
var c = 0
val runningMeanData = runningMean.storage().array()
val runningMeanDataOffset = runningMean.storageOffset() - 1
while(c < nChannels) {
meanData(meanOffset + c) /= frameSize
runningMeanData(runningMeanDataOffset + c) = meanData(meanOffset + c) * momentum +
(1 - momentum) * runningMeanData(c + runningMeanDataOffset)
c += 1
}
val stdData = saveStd.storage().array()
val stdOffset = saveStd.storageOffset() - 1
i = 0
while(i < n) {
var c = 0
while(c < nChannels) {
val diff = (inputData(inputOffset + i + c) - meanData(meanOffset + c))
stdData(stdOffset + c) += diff * diff
c += 1
}
i += nChannels
}
c = 0
val runningVarData = runningVar.storage().array()
val runningVarOffset = runningVar.storageOffset() - 1
while(c < nChannels) {
if (stdData(c + stdOffset) == 0 && eps == 0) {
stdData(c + stdOffset) = 0
if (saveVar != null) {
saveVar.setValue(c + 1, 0f)
}
if (batchVar != null) {
batchVar.setValue(c + 1, 0f)
}
} else {
val s = stdData(c + stdOffset)
val unbiasedVar = s / (frameSize - 1)
if (saveVar != null) {
saveVar.setValue(c + 1, s / frameSize)
}
if (batchVar != null) {
batchVar.setValue(c + 1, unbiasedVar)
}
stdData(c + stdOffset) = 1.0f / Math.sqrt(s / frameSize + eps).toFloat
runningVarData(c + runningVarOffset) = momentum * unbiasedVar +
(1 - momentum) * runningVarData(c + runningVarOffset)
}
c += 1
}
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(meanOffset + c)) * stdData(c + stdOffset) *
scaleData(scaleOffset + c) + offsetData(offsetOffset + c)
c += 1
}
i += nChannels
}
} else {
i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(meanOffset + c)) * stdData(c + stdOffset)
c += 1
}
i += nChannels
}
}
}
private[bigdl] def updateOutputNHWCTrainDouble(input: Tensor[Double], output: Tensor[Double],
saveMean: Tensor[Double], saveStd: Tensor[Double], runningMean: Tensor[Double],
runningVar: Tensor[Double], scale: Tensor[Double], offset: Tensor[Double],
eps: Double, momentum: Double,
batchVar: Tensor[Double] = null, saveVar: Tensor[Double] = null): Unit = {
require(input.isContiguous(), "BatchNorm NHWC require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(4)
if(saveMean.size(1) != nChannels) {
saveMean.resize(nChannels)
saveStd.resize(nChannels)
runningMean.resize(nChannels)
runningVar.resize(nChannels)
}
val meanData = saveMean.storage().array()
val meanOffset = saveMean.storageOffset() - 1
var i = 0
val n = input.nElement()
val frameSize = n / nChannels
while(i < n) {
var c = 0
while(c < nChannels) {
meanData(c + meanOffset) += inputData(inputOffset + i + c)
c += 1
}
i += nChannels
}
var c = 0
val runningMeanData = runningMean.storage().array()
val runningMeanOffset = runningMean.storageOffset() - 1
while(c < nChannels) {
meanData(c + meanOffset) /= frameSize
runningMeanData(c + runningMeanOffset) = meanData(c + meanOffset) * momentum +
(1 - momentum) * runningMeanData(c + runningMeanOffset)
c += 1
}
val stdData = saveStd.storage().array()
val stdOffset = saveStd.storageOffset() - 1
i = 0
while(i < n) {
var c = 0
while(c < nChannels) {
val diff = (inputData(inputOffset + i + c) - meanData(c + meanOffset))
stdData(c + stdOffset) += diff * diff
c += 1
}
i += nChannels
}
c = 0
val runningVarData = runningVar.storage().array()
val runningVarOffset = runningVar.storageOffset() - 1
while(c < nChannels) {
if (stdData(c + stdOffset) == 0 && eps == 0) {
stdData(c + stdOffset) = 0
if (saveVar != null) {
saveVar.setValue(c + 1, 0f)
}
if (batchVar != null) {
batchVar.setValue(c + 1, 0f)
}
} else {
val s = stdData(c + stdOffset)
val unbiasedVar = s / (frameSize - 1)
if (saveVar != null) {
saveVar.setValue(c + 1, s / frameSize)
}
if (batchVar != null) {
batchVar.setValue(c + 1, unbiasedVar)
}
stdData(c + stdOffset) = 1.0f / Math.sqrt(s / frameSize + eps).toFloat
runningVarData(c + runningVarOffset) = momentum * unbiasedVar +
(1 - momentum) * runningVarData(c + runningVarOffset)
}
c += 1
}
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(c + meanOffset)) * stdData(c + stdOffset) *
scaleData(c + scaleOffset) + offsetData(c + offsetOffset)
c += 1
}
i += nChannels
}
} else {
i = 0
while (i < n) {
var c = 0
while (c < nChannels) {
outputData(i + outputOffset + c) = (inputData(i + inputOffset + c) -
meanData(c + meanOffset)) * stdData(c + stdOffset)
c += 1
}
i += nChannels
}
}
}
private[bigdl] def updateOutputNCHWInferFloat(input: Tensor[Float], output: Tensor[Float],
mean: Tensor[Float], variance: Tensor[Float], scale: Tensor[Float],
offset: Tensor[Float], eps: Float): Unit = {
require(input.isContiguous(), "BatchNorm NCHW require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val meanData = mean.storage().array()
val meanOffset = mean.storageOffset() - 1
val varData = variance.storage().array()
val varOffset = variance.storageOffset() - 1
val nChannels = input.size(2)
val nBatch = input.size(1)
val nFrame = input.size(3) * input.size(4)
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
var i = 0
var b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps).toFloat
outputData(i + outputOffset) = (inputData(i + inputOffset) - meanData(c + meanOffset)) *
invStd * scaleData(c + scaleOffset) + offsetData(c + offsetOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
var i = 0
var b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps).toFloat
outputData(i + outputOffset) = (inputData(i + inputOffset) - meanData(c + meanOffset)) *
invStd
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateOutputNCHWInferDouble(input: Tensor[Double], output: Tensor[Double],
mean: Tensor[Double], variance: Tensor[Double], scale: Tensor[Double], offset: Tensor[Double],
eps: Double)
: Unit = {
require(input.isContiguous(), "BatchNorm NCHW require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val meanData = mean.storage().array()
val meanOffset = mean.storageOffset() - 1
val varData = variance.storage().array()
val varOffset = variance.storageOffset() - 1
val nChannels = input.size(2)
val nBatch = input.size(1)
val nFrame = input.size(3) * input.size(4)
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
var i = 0
var b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps)
outputData(i + outputOffset) = (inputData(i + inputOffset) - meanData(c + meanOffset)) *
invStd * scaleData(c + scaleOffset) + offsetData(c + offsetOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
var i = 0
var b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
val invStd = 1 / Math.sqrt(varData(varOffset + c) + eps)
outputData(i + outputOffset) = (inputData(i + inputOffset) - meanData(c + meanOffset)) *
invStd
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateGradInputNHWCTrainFloat(
input: Tensor[Float],
gradOutput: Tensor[Float],
gradInput: Tensor[Float],
scale: Tensor[Float],
saveMean: Tensor[Float],
saveStd: Tensor[Float],
gMean: Tensor[Float],
gxMean: Tensor[Float]
): Unit = {
require(input.nDimension() == 4, "BN require a 4D input")
require(input.isContiguous(), "input is not contiguous")
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(4)
require(saveMean.size(1) == nChannel, "saveMean length is not consistent with channel number")
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
if (gMean.isEmpty) {
gMean.resize(nChannel)
gxMean.resize(nChannel)
}
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val gMeanData = gMean.storage().array()
val gxMeanData = gxMean.storage().array()
val n = gradOutput.nElement()
var i = 0
while(i < n) {
var c = 0
while(c < nChannel) {
gMeanData(c) += gradOutputData(i + gradOutputOffset)
gxMeanData(c) += gradOutputData(i + gradOutputOffset) *
(inputData(i + inputOffset) - saveMeanData(c + saveMeanOffset))
c += 1
i += 1
}
}
var c = 0
val size = n / nChannel
while(c < nChannel) {
gMeanData(c) /= size
gxMeanData(c) /= size
c += 1
}
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
c += 1
i += 1
}
}
} else {
i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
c += 1
i += 1
}
}
}
}
private[bigdl] def updateGradInputNHWCTrainDouble(
input: Tensor[Double],
gradOutput: Tensor[Double],
gradInput: Tensor[Double],
scale: Tensor[Double],
saveMean: Tensor[Double],
saveStd: Tensor[Double],
gMean: Tensor[Double],
gxMean: Tensor[Double]
): Unit = {
require(input.nDimension() == 4, "BN require a 4D input")
require(input.isContiguous(), "input is not contiguous")
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(4)
require(saveMean.size(1) == nChannel, "saveMean length is not consistent with channel number")
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
if (gMean.isEmpty) {
gMean.resize(nChannel)
gxMean.resize(nChannel)
}
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val gMeanData = gMean.storage().array()
val gxMeanData = gxMean.storage().array()
val n = gradOutput.nElement()
var i = 0
while(i < n) {
var c = 0
while(c < nChannel) {
gMeanData(c) += gradOutputData(i + gradOutputOffset)
gxMeanData(c) += gradOutputData(i + gradOutputOffset) *
(inputData(i + inputOffset) - saveMeanData(c + saveMeanOffset))
c += 1
i += 1
}
}
var c = 0
val size = n / nChannel
while(c < nChannel) {
gMeanData(c) /= size
gxMeanData(c) /= size
c += 1
}
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
c += 1
i += 1
}
}
} else {
i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
c += 1
i += 1
}
}
}
}
private[bigdl] def updateGradInputNHWCInferFloat(
gradOutput: Tensor[Float],
gradInput: Tensor[Float],
scale: Tensor[Float],
saveStd: Tensor[Float]
): Unit = {
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(4)
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val n = gradOutput.nElement()
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
var i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * gradOutputData(gradOutputOffset + i)
c += 1
i += 1
}
}
} else {
var i = 0
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * gradOutputData(gradOutputOffset + i)
c += 1
i += 1
}
}
}
}
private[bigdl] def updateGradInputNHWCInferDouble(
gradOutput: Tensor[Double],
gradInput: Tensor[Double],
scale: Tensor[Double],
saveStd: Tensor[Double]
): Unit = {
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(4)
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val n = gradOutput.nElement()
var i = 0
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * gradOutputData(gradOutputOffset + i)
c += 1
i += 1
}
}
} else {
while (i < n) {
var c = 0
while (c < nChannel) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * gradOutputData(gradOutputOffset + i)
c += 1
i += 1
}
}
}
}
private[bigdl] def updateGradInputNCHWTrainFloat(
input: Tensor[Float],
gradOutput: Tensor[Float],
gradInput: Tensor[Float],
scale: Tensor[Float],
saveMean: Tensor[Float],
saveStd: Tensor[Float],
gMean: Tensor[Float],
gxMean: Tensor[Float]
): Unit = {
require(input.nDimension() == 4, "BN require a 4D input")
require(input.isContiguous(), "input is not contiguous")
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(2)
require(saveMean.size(1) == nChannel, "saveMean length is not consistent with channel number")
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
if (gMean.isEmpty) {
gMean.resize(nChannel)
gxMean.resize(nChannel)
}
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val gMeanData = gMean.storage().array()
val gxMeanData = gxMean.storage().array()
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
val n = gradOutput.nElement()
var b = 0
var i = 0
while(b < nBatch) {
var c = 0
while(c < nChannel) {
var k = 0
while(k < frameSize) {
gMeanData(c) += gradOutputData(i + gradOutputOffset)
gxMeanData(c) += gradOutputData(i + gradOutputOffset) *
(inputData(i + inputOffset) - saveMeanData(c + saveMeanOffset))
k += 1
i += 1
}
c += 1
}
b += 1
}
var c = 0
val size = n / nChannel
while(c < nChannel) {
gMeanData(c) /= size
gxMeanData(c) /= size
c += 1
}
i = 0
b = 0
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * (gradOutputData(gradOutputOffset + i) - gMeanData(c) -
gxMeanData(c) * invStd * invStd * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)))
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateOutputNCHWTrainFloat(input: Tensor[Float], output: Tensor[Float],
saveMean: Tensor[Float], saveStd: Tensor[Float], runningMean: Tensor[Float],
runningVar: Tensor[Float], scale: Tensor[Float], offset: Tensor[Float],
eps: Float, momentum: Float,
batchVar: Tensor[Float] = null, saveVar: Tensor[Float] = null, needFix: Boolean = false)
: Unit = {
require(input.isContiguous(), "BatchNorm NCHW require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(2)
val nBatch = input.size(1)
val nFrame = input.size(3) * input.size(4)
if(saveMean.size(1) != nChannels) {
saveMean.resize(nChannels)
saveStd.resize(nChannels)
runningMean.resize(nChannels)
runningVar.resize(nChannels)
}
val meanData = saveMean.storage().array()
val meanOffset = saveMean.storageOffset() - 1
var i = 0
var b = 0
while(b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
var meanSum = 0f
while(k < nFrame) {
meanSum += inputData(i + inputOffset)
k += 1
i += 1
}
meanData(c + meanOffset) += meanSum
c += 1
}
b += 1
}
val n = input.nElement()
val frameSize = n / nChannels
var c = 0
val runningMeanData = runningMean.storage().array()
val runningMeanOffset = runningMean.storageOffset() - 1
while(c < nChannels) {
meanData(c + meanOffset) /= frameSize
runningMeanData(c + runningMeanOffset) = meanData(c + meanOffset) * momentum +
(1 - momentum) * runningMeanData(c + runningMeanOffset)
c += 1
}
val stdData = saveStd.storage().array()
val stdOffset = saveStd.storageOffset() - 1
i = 0
b = 0
while(b < nBatch) {
var c = 0
while(c < nChannels) {
var k = 0
var stdSum = 0f
while(k < nFrame) {
val diff = (inputData(i + inputOffset) - meanData(c + meanOffset))
stdSum += diff * diff
k += 1
i += 1
}
stdData(c + stdOffset) += stdSum
c += 1
}
b += 1
}
c = 0
val runningVarData = runningVar.storage().array()
val runningVarOffset = runningVar.storageOffset() - 1
while(c < nChannels) {
if (stdData(c + stdOffset) == 0 && eps == 0) {
stdData(c + stdOffset) = 0
if (saveVar != null) {
saveVar.setValue(c + 1, 0f)
}
if (batchVar != null) {
batchVar.setValue(c + 1, 0f)
}
} else {
val s = stdData(c + stdOffset)
val unbiasedVar = s / (frameSize - 1)
if (saveVar != null) {
saveVar.setValue(c + 1, s / frameSize)
}
if (batchVar != null) {
batchVar.setValue(c + 1, unbiasedVar)
}
stdData(c + stdOffset) = 1.0f / Math.sqrt(s / frameSize + eps).toFloat
runningVarData(c + runningVarOffset) = momentum * unbiasedVar +
(1 - momentum) * runningVarData(c + runningVarOffset)
}
c += 1
}
if (needFix) {
c = 0
while(c < nChannels) {
meanData(c + meanOffset) = 0
stdData(c + stdOffset) = 0.0001f
c += 1
}
}
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
i = 0
b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
outputData(i + outputOffset) = (inputData(i + inputOffset) -
meanData(c + meanOffset)) * stdData(c + stdOffset) *
scaleData(c + scaleOffset) + offsetData(c + offsetOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
i = 0
b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
outputData(i + outputOffset) = (inputData(i + inputOffset) -
meanData(c + meanOffset)) * stdData(c + stdOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateOutputNCHWTrainDouble(input: Tensor[Double], output: Tensor[Double],
saveMean: Tensor[Double], saveStd: Tensor[Double], runningMean: Tensor[Double],
runningVar: Tensor[Double], scale: Tensor[Double], offset: Tensor[Double],
eps: Double, momentum: Double,
batchVar: Tensor[Double] = null, saveVar: Tensor[Double] = null, needFix: Boolean = false)
: Unit = {
require(input.isContiguous(), "BatchNorm NCHW require a contiguous input")
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val outputData = output.storage().array()
val outputOffset = output.storageOffset() - 1
val nChannels = input.size(2)
val nBatch = input.size(1)
val nFrame = input.size(3) * input.size(4)
if(saveMean.size(1) != nChannels) {
saveMean.resize(nChannels)
saveStd.resize(nChannels)
runningMean.resize(nChannels)
runningVar.resize(nChannels)
}
val meanData = saveMean.storage().array()
val meanOffset = saveMean.storageOffset() - 1
var i = 0
var b = 0
while(b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
var meanSum = 0d
while(k < nFrame) {
meanSum += inputData(i + inputOffset)
k += 1
i += 1
}
meanData(c + meanOffset) += meanSum
c += 1
}
b += 1
}
val n = input.nElement()
val frameSize = n / nChannels
var c = 0
val runningMeanData = runningMean.storage().array()
val runningMeanOffset = runningMean.storageOffset() - 1
while(c < nChannels) {
meanData(c + meanOffset) /= frameSize
runningMeanData(c + runningMeanOffset) = meanData(c + meanOffset) * momentum +
(1 - momentum) * runningMeanData(c + runningMeanOffset)
c += 1
}
val stdData = saveStd.storage().array()
val stdOffset = saveStd.storageOffset() - 1
i = 0
b = 0
while(b < nBatch) {
var c = 0
while(c < nChannels) {
var k = 0
while(k < nFrame) {
val diff = (inputData(i + inputOffset) - meanData(c + meanOffset))
stdData(c + stdOffset) += diff * diff
k += 1
i += 1
}
c += 1
}
b += 1
}
c = 0
val runningVarData = runningVar.storage().array()
val runningVarOffset = runningVar.storageOffset() - 1
while(c < nChannels) {
if (stdData(c + stdOffset) == 0 && eps == 0) {
stdData(c + stdOffset) = 0
if (saveVar != null) {
saveVar.setValue(c + 1, 0f)
}
if (batchVar != null) {
batchVar.setValue(c + 1, 0f)
}
} else {
val s = stdData(c + stdOffset)
val unbiasedVar = s / (frameSize - 1)
if (saveVar != null) {
saveVar.setValue(c + 1, s / frameSize)
}
if (batchVar != null) {
batchVar.setValue(c + 1, unbiasedVar)
}
stdData(c + stdOffset) = 1.0 / Math.sqrt(s / frameSize + eps)
runningVarData(c + stdOffset) = momentum * unbiasedVar + (1 - momentum) *
runningVarData(c + runningVarOffset)
}
c += 1
}
if (needFix) {
c = 0
while(c < nChannels) {
meanData(c + meanOffset) = 0
stdData(c + stdOffset) = 0.0001
c += 1
}
}
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
val offsetData = offset.storage().array()
val offsetOffset = offset.storageOffset() - 1
i = 0
b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
outputData(i + outputOffset) = (inputData(i + inputOffset) -
meanData(c + meanOffset)) * stdData(c + stdOffset) *
scaleData(c + scaleOffset) + offsetData(c + offsetOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
i = 0
b = 0
while (b < nBatch) {
var c = 0
while (c < nChannels) {
var k = 0
while (k < nFrame) {
outputData(i + outputOffset) = (inputData(i + inputOffset) -
meanData(c + meanOffset)) * stdData(c + stdOffset)
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateGradInputNCHWTrainDouble(
input: Tensor[Double],
gradOutput: Tensor[Double],
gradInput: Tensor[Double],
scale: Tensor[Double],
saveMean: Tensor[Double],
saveStd: Tensor[Double],
gMean: Tensor[Double],
gxMean: Tensor[Double]
): Unit = {
require(input.nDimension() == 4, "BN require a 4D input")
require(input.isContiguous(), "input is not contiguous")
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(2)
require(saveMean.size(1) == nChannel, "saveMean length is not consistent with channel number")
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
if (gMean.isEmpty) {
gMean.resize(saveMean.size(1))
gxMean.resize(saveMean.size(1))
}
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val gMeanData = gMean.storage().array()
val gxMeanData = gxMean.storage().array()
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
val n = gradOutput.nElement()
var b = 0
var i = 0
while(b < nBatch) {
var c = 0
while(c < nChannel) {
var k = 0
while(k < frameSize) {
gMeanData(c) += gradOutputData(i + gradOutputOffset)
gxMeanData(c) += gradOutputData(i + gradOutputOffset) *
(inputData(i + inputOffset) - saveMeanData(c + saveMeanOffset))
k += 1
i += 1
}
c += 1
}
b += 1
}
var c = 0
val size = n / nChannel
while(c < nChannel) {
gMeanData(c) /= size
val invStd = saveStdData(saveStdOffset + c)
gxMeanData(c) = gxMeanData(c) * invStd * invStd / size
c += 1
}
i = 0
b = 0
if (scale != null) {
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = (gradOutputData(gradOutputOffset + i) -
gMeanData(c) - (inputData(inputOffset + i) - saveMeanData(saveMeanOffset + c)) *
gxMeanData(c)) * invStd * scaleData(scaleOffset + c)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = (gradOutputData(gradOutputOffset + i) -
gMeanData(c) - (inputData(inputOffset + i) - saveMeanData(saveMeanOffset + c)) *
gxMeanData(c)) * invStd
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateGradInputNCHWInferFloat(
gradOutput: Tensor[Float],
gradInput: Tensor[Float],
scale: Tensor[Float],
saveStd: Tensor[Float]
): Unit = {
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(2)
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
var b = 0
var i = 0
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * gradOutputData(gradOutputOffset + i)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * gradOutputData(gradOutputOffset + i)
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def updateGradInputNCHWInferDouble(
gradOutput: Tensor[Double],
gradInput: Tensor[Double],
scale: Tensor[Double],
saveStd: Tensor[Double]
): Unit = {
require(gradOutput.nDimension() == 4, "BN require a 4D gradient")
require(gradOutput.isContiguous(), "gradient is not contiguous")
val nChannel = gradOutput.size(2)
require(saveStd.size(1) == nChannel, "saveStd length is not consistent with channel number")
gradInput.resizeAs(gradOutput)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradInputData = gradInput.storage().array()
val gradInputOffset = gradInput.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
var b = 0
var i = 0
if (scale != null) {
require(scale.size(1) == nChannel, "scale length is not consistent with channel number")
val scaleData = scale.storage().array()
val scaleOffset = scale.storageOffset() - 1
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) = scaleData(scaleOffset + c) *
invStd * gradOutputData(gradOutputOffset + i)
k += 1
i += 1
}
c += 1
}
b += 1
}
} else {
while (b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while (k < frameSize) {
val invStd = saveStdData(saveStdOffset + c)
gradInputData(gradInputOffset + i) =
invStd * gradOutputData(gradOutputOffset + i)
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
private[bigdl] def accGradientNHWCFloat(gradOutput: Tensor[Float],
gradWeight: Tensor[Float], gradBias: Tensor[Float],
input: Tensor[Float], saveMean: Tensor[Float],
saveStd: Tensor[Float], scaleW: Float, scaleB: Float): Unit = {
require(gradOutput.isContiguous(), "gradOutput must be contiguous")
require(gradWeight.isContiguous(), "gradWeight must be contiguous")
require(gradBias.isContiguous(), "gradBias must be contiguous")
require(input.isContiguous(), "gradWeight must be contiguous")
require(saveMean.nDimension() == 1, "saveMean must be 1D")
require(saveStd.nDimension() == 1, "saveStd must be 1D")
val nChannel = saveMean.size(1)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradWeightData = gradWeight.storage().array()
val gradWeightOffset = gradWeight.storageOffset() - 1
val gradBiasData = gradBias.storage().array()
val gradBiasOffset = gradBias.storageOffset() - 1
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
var i = 0
val n = input.nElement()
while(i < n) {
var c = 0
while(c < nChannel) {
val g = gradOutputData(gradOutputOffset + i)
gradWeightData(c + gradWeightOffset) += g *
(inputData(inputOffset + i) - saveMeanData(saveMeanOffset + c)) *
saveStdData(saveStdOffset + c) * scaleW
gradBiasData(c + gradBiasOffset) += g * scaleB
i += 1
c += 1
}
}
}
private[bigdl] def accGradientNHWCDouble(gradOutput: Tensor[Double],
gradWeight: Tensor[Double], gradBias: Tensor[Double],
input: Tensor[Double], saveMean: Tensor[Double],
saveStd: Tensor[Double], scaleW: Double, scaleB: Double): Unit = {
require(gradOutput.isContiguous(), "gradOutput must be contiguous")
require(gradWeight.isContiguous(), "gradWeight must be contiguous")
require(gradBias.isContiguous(), "gradBias must be contiguous")
require(input.isContiguous(), "gradWeight must be contiguous")
require(saveMean.nDimension() == 1, "saveMean must be 1D")
require(saveStd.nDimension() == 1, "saveStd must be 1D")
val nChannel = saveMean.size(1)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradWeightData = gradWeight.storage().array()
val gradWeightOffset = gradWeight.storageOffset() - 1
val gradBiasData = gradBias.storage().array()
val gradBiasOffset = gradBias.storageOffset() - 1
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
var i = 0
val n = input.nElement()
while(i < n) {
var c = 0
while(c < nChannel) {
val g = gradOutputData(gradOutputOffset + i)
gradWeightData(c + gradWeightOffset) += g *
(inputData(inputOffset + i) - saveMeanData(saveMeanOffset + c)) *
saveStdData(saveStdOffset + c) * scaleW
gradBiasData(c + gradBiasOffset) += g * scaleB
i += 1
c += 1
}
}
}
private[bigdl] def accGradientNCHWFloat(gradOutput: Tensor[Float],
gradWeight: Tensor[Float], gradBias: Tensor[Float],
input: Tensor[Float], saveMean: Tensor[Float],
saveStd: Tensor[Float], scaleW: Float, scaleB: Float): Unit = {
require(gradOutput.isContiguous(), "gradOutput must be contiguous")
require(gradWeight.isContiguous(), "gradWeight must be contiguous")
require(gradBias.isContiguous(), "gradBias must be contiguous")
require(input.isContiguous(), "gradWeight must be contiguous")
require(saveMean.nDimension() == 1, "saveMean must be 1D")
require(saveStd.nDimension() == 1, "saveStd must be 1D")
val nChannel = saveMean.size(1)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradWeightData = gradWeight.storage().array()
val gradWeightOffset = gradWeight.storageOffset() - 1
val gradBiasData = gradBias.storage().array()
val gradBiasOffset = gradBias.storageOffset() - 1
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
var i = 0
var b = 0
while(b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while(k < frameSize) {
val g = gradOutputData(gradOutputOffset + i)
gradWeightData(c + gradWeightOffset) += g *
(inputData(inputOffset + i) - saveMeanData(saveMeanOffset + c)) *
saveStdData(saveStdOffset + c) * scaleW
gradBiasData(c + gradBiasOffset) += g * scaleB
k += 1
i += 1
}
c += 1
}
b += 1
}
}
private[bigdl] def accGradientNCHWDouble(gradOutput: Tensor[Double],
gradWeight: Tensor[Double], gradBias: Tensor[Double],
input: Tensor[Double], saveMean: Tensor[Double],
saveStd: Tensor[Double], scaleW: Double, scaleB: Double): Unit = {
require(gradOutput.isContiguous(), "gradOutput must be contiguous")
require(gradWeight.isContiguous(), "gradWeight must be contiguous")
require(gradBias.isContiguous(), "gradBias must be contiguous")
require(input.isContiguous(), "gradWeight must be contiguous")
require(saveMean.nDimension() == 1, "saveMean must be 1D")
require(saveStd.nDimension() == 1, "saveStd must be 1D")
val nChannel = saveMean.size(1)
val gradOutputData = gradOutput.storage().array()
val gradOutputOffset = gradOutput.storageOffset() - 1
val gradWeightData = gradWeight.storage().array()
val gradWeightOffset = gradWeight.storageOffset() - 1
val gradBiasData = gradBias.storage().array()
val gradBiasOffset = gradBias.storageOffset() - 1
val inputData = input.storage().array()
val inputOffset = input.storageOffset() - 1
val saveMeanData = saveMean.storage().array()
val saveMeanOffset = saveMean.storageOffset() - 1
val saveStdData = saveStd.storage().array()
val saveStdOffset = saveStd.storageOffset() - 1
val nBatch = gradOutput.size(1)
val frameSize = gradOutput.size(3) * gradOutput.size(4)
var i = 0
var b = 0
while(b < nBatch) {
var c = 0
while (c < nChannel) {
var k = 0
while(k < frameSize) {
val g = gradOutputData(gradOutputOffset + i)
gradWeightData(c + gradWeightOffset) += scaleW * (inputData(inputOffset + i) -
saveMeanData(saveMeanOffset + c)) * g * saveStdData(saveStdOffset + c)
gradBiasData(c + gradBiasOffset) += g * scaleB
k += 1
i += 1
}
c += 1
}
b += 1
}
}
}
|
qiuxin2012/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/SpatialBatchNormalization.scala
|
Scala
|
apache-2.0
| 63,943
|
package org.jetbrains.plugins.scala.lang.resolve2
/**
* Pavel.Fatin, 02.02.2010
*/
class ImportPathTest extends ResolveTestBase {
override def folderPath: String = {
super.folderPath + "import/path/"
}
protected override def rootPath(): String = folderPath
def testDir = doTest
//TODO ok
// def testDirAndLocal = doTest
def testDirThenLocal = doTest
//TODO ok
// def testTwoLocal = doTest
}
|
triggerNZ/intellij-scala
|
test/org/jetbrains/plugins/scala/lang/resolve2/ImportPathTest.scala
|
Scala
|
apache-2.0
| 415
|
package algorithms.recursion
import algorithms.recursion.PasswordCracker.solution
import org.scalatest.{FunSuite, WordSpec}
class PasswordCrackerTest extends WordSpec {
"because can do must we what" in {
val result = solution(
"because can do must we what".split(" ").toList,
"wedowhatwemustbecausewecan"
)
assert(result.get.mkString(" ") === "we do what we must because we can")
}
}
|
1178615156/hackerrank
|
src/test/scala/algorithms/recursion/PasswordCrackerTest.scala
|
Scala
|
apache-2.0
| 417
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import java.io.Reader
import java.util
import monix.execution.Ack.{Continue, Stop}
import monix.execution.atomic.Atomic
import monix.execution.cancelables.BooleanCancelable
import monix.execution._
import monix.execution.exceptions.APIContractViolationException
import monix.execution.internal.Platform
import scala.util.control.NonFatal
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.concurrent.{blocking, Future}
import scala.util.{Failure, Success}
private[reactive] final class CharsReaderObservable(in: Reader, chunkSize: Int) extends Observable[Array[Char]] {
require(chunkSize > 0, "chunkSize > 0")
private[this] val wasSubscribed = Atomic(false)
def unsafeSubscribeFn(out: Subscriber[Array[Char]]): Cancelable = {
if (!wasSubscribed.compareAndSet(false, true)) {
out.onError(APIContractViolationException("ReaderObservable does not support multiple subscribers"))
Cancelable.empty
} else {
val buffer = new Array[Char](chunkSize)
// A token that will be checked for cancellation
val cancelable = BooleanCancelable()
val em = out.scheduler.executionModel
// Schedule first cycle
reschedule(Continue, buffer, out, cancelable, em)(out.scheduler)
cancelable
}
}
private def reschedule(
ack: Future[Ack],
b: Array[Char],
out: Subscriber[Array[Char]],
c: BooleanCancelable,
em: ExecutionModel)(implicit s: Scheduler): Unit = {
ack.onComplete {
case Success(next) =>
// Should we continue, or should we close the stream?
if (next == Continue && !c.isCanceled) {
// Using Scala's BlockContext, since this is potentially a blocking call
blocking(fastLoop(b, out, c, em, 0))
}
// else stop
case Failure(ex) =>
reportFailure(ex)
}
}
@tailrec
private def fastLoop(
buffer: Array[Char],
out: Subscriber[Array[Char]],
c: BooleanCancelable,
em: ExecutionModel,
syncIndex: Int)(implicit s: Scheduler): Unit = {
// Dealing with mutable status in order to keep the
// loop tail-recursive :-(
var errorThrown: Throwable = null
var ack: Future[Ack] = Continue
var streamErrors = true
try {
val length = fillBuffer(in, buffer)
// From this point on, whatever happens is a protocol violation
streamErrors = false
ack = if (length >= 0) {
// As long as the returned length is positive, it means
// we haven't reached EOF. Making a copy of the array, because
// we cannot our mutable buffer.
val next = util.Arrays.copyOf(buffer, length)
out.onNext(next)
} else {
out.onComplete()
Stop
}
} catch {
case ex if NonFatal(ex) =>
errorThrown = ex
}
if (errorThrown == null) {
// Logic for collapsing execution loops
val nextIndex =
if (ack == Continue) em.nextFrameIndex(syncIndex)
else if (ack == Stop) -1
else 0
if (!c.isCanceled) {
if (nextIndex > 0)
fastLoop(buffer, out, c, em, nextIndex)
else if (nextIndex == 0)
reschedule(ack, buffer, out, c, em)
else
() // Stop!
}
} else {
// Dealing with unexpected errors
if (streamErrors)
sendError(out, errorThrown)
else
reportFailure(errorThrown)
}
}
@tailrec
private def fillBuffer(in: Reader, buffer: Array[Char], nTotalCharsRead: Int = 0): Int = {
if (nTotalCharsRead >= buffer.length) {
nTotalCharsRead
} else {
val nCharsRead = in.read(buffer, nTotalCharsRead, buffer.length - nTotalCharsRead)
if (nCharsRead >= 0) {
fillBuffer(in, buffer, nTotalCharsRead + nCharsRead)
} else { // stream has ended
if (nTotalCharsRead <= 0)
nCharsRead // no more chars (-1 via Reader.read contract) available, end the observable
else
nTotalCharsRead // we read the last chars available
}
}
}
private def sendError(out: Subscriber[Nothing], e: Throwable)(implicit s: UncaughtExceptionReporter): Unit = {
try {
out.onError(e)
} catch {
case NonFatal(e2) =>
reportFailure(Platform.composeErrors(e, e2))
}
}
private def reportFailure(e: Throwable)(implicit s: UncaughtExceptionReporter): Unit = {
s.reportFailure(e)
// Forcefully close in case of protocol violations, because we are
// not signaling the error downstream, which could lead to leaks
try in.close()
catch { case NonFatal(_) => () }
}
}
|
alexandru/monifu
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/CharsReaderObservable.scala
|
Scala
|
apache-2.0
| 5,361
|
/*
* Happy Melly Teller
* Copyright (C) 2013 - 2014, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional terms, you may contact
* by email Sergey Kotlov, sergey.kotlov@happymelly.com or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package models
import java.math.RoundingMode
import org.joda.money.{CurrencyUnit, Money}
import org.joda.time.DateTime
import scala.math.BigDecimal.int2bigDecimal
/**
* Represents an exchange rate between two currencies at a certain time.
*
* The exchange rate means that a one `base` is worth `rate` in the `counter` currency.
* @see http://en.wikipedia.org/wiki/Currency_pair
* @see https://openexchangerates.org/documentation#how-to-use
* @param base The base currency (a.k.a. ‘unit’)
* @param counter The counter currency (a.k.a. ‘quote’)
* @param rate The conversion rate
* @param timestamp The time at which the exchange rate was determined
*/
case class ExchangeRate(id: Option[Long], base: CurrencyUnit, counter: CurrencyUnit, rate: BigDecimal, timestamp: DateTime) {
if (rate.signum != 1) throw new IllegalArgumentException(s"Illegal rate: $rate")
assert(!base.equals(counter) || (base.equals(counter) && rate.compare(1.bigDecimal) == 0))
lazy val inverseRate = 1.bigDecimal.setScale(rate.scale).divide(rate.bigDecimal, RoundingMode.DOWN)
/**
* @see `convert`
*/
def apply(amount: Money) = convert(amount)
/**
* Converts an amount from `base` or `counter` or vice versa, using `rate`.
* @param amount The amount to convert
* @return The converted amount.
*/
def convert(amount: Money): Money = amount.getCurrencyUnit match {
case currency if base.equals(counter) && currency.equals(base) ⇒ amount
case currency if currency.equals(base) ⇒ amount.convertedTo(counter, rate.bigDecimal, RoundingMode.DOWN)
case currency if currency.equals(counter) ⇒ amount.convertedTo(base, inverseRate, RoundingMode.DOWN)
case currency ⇒ throw new IllegalArgumentException(s"Exchange rate is for $base $counter, tried to convert $currency")
}
/**
* Checks if this ExchangeRate can convert the given amount.
* @param amount
* @return
*/
def canConvert(amount: Money): Boolean = base.equals(amount.getCurrencyUnit) || counter.equals(amount.getCurrencyUnit)
def inverse = ExchangeRate(None, counter, base, inverseRate, timestamp)
override def toString = s"$base/$counter $rate"
}
|
HappyMelly/teller
|
app/models/ExchangeRate.scala
|
Scala
|
gpl-3.0
| 3,236
|
package tifmo.document
import tifmo.dcstree.{ Executor, Relation }
import tifmo.inference.{ RuleDo, TermIndex, IEngineCore }
trait RelRightUpEntailing extends Relation {
override def execute[T](ex: Executor, a: T, b: T) {
super.execute(ex, a, b)
(ex, a, b) match {
case (ie: IEngineCore, xa: TermIndex, xb: TermIndex) =>
val a = xa.holder
ie.foreachSuperset(xb, Seq.empty, RuleDo((ie, pred, _) => {
val xbSuper = pred.superset
ie.claimRL(a.index, this, xbSuper)
}))
case _ => // Nothing
}
}
}
|
tomtung/tifmo
|
src/main/scala/tifmo/document/RelRightUpEntailing.scala
|
Scala
|
bsd-2-clause
| 568
|
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.surfnet.nsiv2
package messages
import com.google.common.collect.ImmutableRangeSet
import com.google.common.collect.{ Range => GRange }
import com.google.common.collect.TreeRangeSet
import java.net.URLDecoder
import java.net.URLEncoder
import java.util.regex.Pattern
import scala.collection.JavaConverters._
import scala.collection.immutable.SortedMap
import scala.util.Try
import play.utils.UriEncoding
import com.google.common.collect.DiscreteDomain
import com.google.common.collect.BoundType
final class VlanRange(private val ranges: ImmutableRangeSet[Integer]) {
require(!ranges.isEmpty, "VLAN ranges cannot be empty")
require(ranges.asRanges().asScala.forall { r =>
(r.hasLowerBound() && r.lowerBoundType() == BoundType.CLOSED
&& r.hasUpperBound() && r.upperBoundType() == BoundType.CLOSED)
}, "all ranges must be closed")
private def span = ranges.span
def isSingleton: Boolean = lowerBound == upperBound
def isSubsetOf(that: VlanRange): Boolean = that.ranges enclosesAll this.ranges
def lowerBound: Int = span.lowerEndpoint()
def upperBound: Int = span.upperEndpoint()
def intersect(that: VlanRange): Option[VlanRange] = {
val intersection = TreeRangeSet.create[Integer]
this.ranges.asSet(DiscreteDomain.integers()).asScala.foreach { (vlan: Integer) =>
if (that.ranges.contains(vlan)) {
intersection.add(GRange.closedOpen(vlan, vlan + 1))
}
}
val closedRanges = TreeRangeSet.create[Integer]
intersection.asRanges().asScala.foreach { range =>
closedRanges.add(GRange.closed(range.lowerEndpoint, range.upperEndpoint - 1))
}
if (closedRanges.isEmpty) None else Some(new VlanRange(ImmutableRangeSet.copyOf(closedRanges)))
}
override def equals(o: Any) = o match {
case that: VlanRange => this.ranges == that.ranges
case _ => false
}
override def hashCode = ranges.hashCode
override def toString = ranges.asRanges().asScala.map { range =>
(range.lowerEndpoint(), range.upperEndpoint()) match {
case (lower, upper) if lower == upper => f"$lower%d"
case (lower, upper) => f"$lower%d-$upper%d"
}
}.mkString(",")
}
object VlanRange {
private final val ALLOWED_SYNTAX = Pattern.compile("\\\\s*\\\\d+(\\\\s*-\\\\s*\\\\d+)?(\\\\s*,\\\\s*\\\\d+(\\\\s*-\\\\s*\\\\d+)?)*\\\\s*")
private final val RANGE_PATTERN = "(\\\\d+)-(\\\\d+)".r
private final val SINGLETON_PATTERN = "(\\\\d+)".r
private[messages] def apply(ranges: Seq[GRange[Integer]]): VlanRange = {
val set = TreeRangeSet.create[Integer]
ranges.foreach(set.add)
new VlanRange(ImmutableRangeSet.copyOf(set))
}
val all: VlanRange = apply(Seq(GRange.closed(1, 4094)))
def singleton(v: Int): VlanRange = apply(Seq(GRange.singleton(v)))
def range(range: Range): Option[VlanRange] = {
val end = if (range.isInclusive) range.end else range.end - 1
if (range.start <= end && range.step == 1) Some(VlanRange(Seq(GRange.closed(range.start, end)))) else None
}
def fromString(s: String): Option[VlanRange] = if (!ALLOWED_SYNTAX.matcher(s).matches()) None else Try {
val ranges = s.replaceAll("\\\\s+", "").split(",").map {
case RANGE_PATTERN(lower, upper) => GRange.closed(Integer.valueOf(lower), Integer.valueOf(upper))
case SINGLETON_PATTERN(value) => GRange.singleton(Integer.valueOf(value))
}
VlanRange(ranges)
}.toOption
}
case class Stp(identifier: String, labels: SortedMap[String, Option[String]] = SortedMap.empty) {
require(identifier.nonEmpty, "identifier must be non-empty")
require(labels.forall(_._1.nonEmpty), "label types must be non-empty")
def withoutLabels = copy(labels = SortedMap.empty)
def withoutLabel(labelType: String) = copy(labels = labels - labelType)
def withLabel(labelType: String, labelValue: String) = copy(labels = labels + (labelType -> Some(labelValue)))
def vlan: Option[VlanRange] = labels.getOrElse("vlan", None).flatMap(VlanRange.fromString)
def serverVlan: Option[VlanRange] = labels.getOrElse("s-vlan", None).flatMap(VlanRange.fromString)
def isClientVlanCompatibleWith(target: Stp): Boolean = (this.vlan, target.vlan) match {
case (None, _) => true
case (Some(specified), Some(allowed)) => specified isSubsetOf allowed
case _ => false
}
def isServerVlanCompatibleWith(target: Stp): Boolean = (this.serverVlan, target.serverVlan) match {
case (None, None) => true
case (Some(specified), Some(allowed)) => specified isSubsetOf allowed
case _ => false
}
def isCompatibleWith(that: Stp) = this.identifier == that.identifier && this.isClientVlanCompatibleWith(that) && this.isServerVlanCompatibleWith(that)
override def toString = UriEncoding.encodePathSegment(identifier, "UTF-8") ++ queryString
private def queryString = if (labels.isEmpty) "" else labels.iterator.map {
case (label, None) => URLEncoder.encode(label, "UTF-8")
case (label, Some(value)) => s"${URLEncoder.encode(label, "UTF-8")}=${URLEncoder.encode(value, "UTF-8")}"
}.mkString("?", "&", "")
}
object Stp {
type Label = (String, Option[String])
import scala.math.Ordering.Implicits._
implicit val StpOrdering: Ordering[Stp] = Ordering.by(stp => (stp.identifier, stp.labels.toStream))
private val LabelPattern = "([^=]*)(?:=([^=]*))?".r
def fromString(s: String): Option[Stp] = {
def parseLabel(label: String): Option[Label] = label match {
case LabelPattern(labelType, labelValue) if labelType.nonEmpty =>
Some((URLDecoder.decode(labelType, "UTF-8"), Option(labelValue).map(URLDecoder.decode(_, "UTF-8"))))
case _ =>
None
}
s.split(Pattern.quote("?")) match {
case Array(identifier) if identifier.nonEmpty =>
Some(Stp(UriEncoding.decodePath(identifier, "UTF-8")))
case Array(identifier, queryString) if identifier.nonEmpty =>
val parsedLabels = queryString.split(Pattern.quote("&")).map(parseLabel)
val labels = if (parsedLabels contains None) None else Some(parsedLabels.map(_.get))
labels.map { l =>
Stp(UriEncoding.decodePath(identifier, "UTF-8"), SortedMap(l: _*))
}
case _ =>
None
}
}
}
|
BandwidthOnDemand/play-nsi-support
|
src/main/scala/nl/surfnet/nsiv2/messages/Stp.scala
|
Scala
|
bsd-3-clause
| 7,855
|
/*
* Copyright 2009-2015 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* @author: sishah
* @date: 07/07/15
* @version: 1.0
*/
package com.linkedin.norbert
package network
package server
import com.linkedin.norbert.cluster.Node
import com.linkedin.norbert.network.garbagecollection.GcParamWrapper
class GcAwareMessageExecutorSpec extends MessageExecutorSpec {
object nodeTypes extends Enumeration {
val noNode, goodNode, badNode = Value
}
val cycleTime = 6000
val slotTime = 2000
val slaTime = 1000
// Buffer time, in milliseconds, to prevent test cases failing at slot transition boundaries.
// i.e. we wait this amount of time into a particular slot before testing
val slackTime = 10
val goodGcParams = new GcParamWrapper(slaTime, cycleTime, slotTime)
val badNode = Node(1, "localhost:31313", true)
def noNode:Node = {throw new NetworkServerNotBoundException}
val goodNode = Node(1, "localhost:31313", true, Set.empty, None, None, Some(0))
var nodeFlag = nodeTypes.badNode
def getNode = {
Some (
nodeFlag match {
case nodeTypes.badNode => badNode
case nodeTypes.noNode => noNode
case nodeTypes.goodNode => goodNode
}
)
}
// MessageExecutorSpec by default runs all tests with no GcParams and no defined node.
// This spec overrides the message executor to have valid GcParams, and a node based on a flag.
override val messageExecutor = new ThreadPoolMessageExecutor(None, "service",
messageHandlerRegistry,
filters,
1000L,
1,
1,
1,
100,
1000L,
-1,
goodGcParams,
getNode
)
"GcAwareMessageExecutor" should {
doAfter {
messageExecutor.shutdown
}
//No node is bound
"successfully respond (with no bound node) in" in {
nodeFlag = nodeTypes.noNode
generalExecutorTests
}
//These tests occur outside the GC period
"successfully respond (with a not-currently-GCing node) in" in {
nodeFlag = nodeTypes.goodNode
waitTillStartOfNewCycle
waitFor((slotTime + slackTime).ms)
generalExecutorTests
}
}
def waitTillStartOfNewCycle: Unit = {
println("Waiting till start of new cycle")
while (System.currentTimeMillis() % cycleTime != 0) {}
}
}
|
thesiddharth/norbert
|
network/src/test/scala/com/linkedin/norbert/network/server/GcAwareMessageExecutorSpec.scala
|
Scala
|
apache-2.0
| 2,804
|
package org.gg.play.authentication.misc
import play.api.Logger
/**
* User: luigi
* Date: 20/04/13
* Time: 16:51
*/
trait Loggable {
lazy val log = Logger("application." + this.getClass.getName)
}
|
gigiigig/play2-authentication
|
app/misc/Loggable.scala
|
Scala
|
gpl-3.0
| 204
|
package dbtarzan.gui.config.connections
import scalafx.scene.control.{ComboBox, ListCell}
import scalafx.scene.Parent
import scalafx.collections.ObservableBuffer
import scalafx.Includes._
import dbtarzan.db.{Schema, Schemas}
import dbtarzan.gui.TControlBuilder
import scalafx.event.ActionEvent
import scalafx.util.StringConverter
import scala.collection.immutable
/* A combo box from which to select the identfier delimiters that get stored in the configuration file */
class ComboSchemas() extends TControlBuilder with TCombo {
private val schemas = ObservableBuffer.empty[Option[Schema]]
private val cmbSchemas = new ComboBox[Option[Schema]] {
items = schemas
editable = true
value = None
cellFactory = { _ => buildCell() }
buttonCell = buildCell()
converter = new StringConverter[Option[Schema]] {
override def fromString(v: String): Option[Schema] =
Some(v.trim()).filter(_.nonEmpty).map(t => Schema(t))
override def toString(t: Option[Schema]): String =
t.map(_.name).getOrElse("")
}
}
private def buildCell() = new ListCell[Option[Schema]] {
item.onChange { (value , oldValue, newValue) => {
val optValue = Option(newValue).flatten
// the orElse is to avoid problems when removing items
val valueOrEmpty = optValue.map(value => value.name).orElse(Some(""))
valueOrEmpty.foreach({ text.value = _ })
}}}
def schemasToChooseFrom(newSchemas: Schemas): Unit = {
schemas.clear()
val schemasToAdd: immutable.List[Some[Schema]] = newSchemas.schemas.map(Some(_))
schemas.addAll(schemasToAdd)
}
def clearSchemasToChooseFrom() : Unit =
schemas.clear()
def show(schema : Option[Schema]) : Unit =
cmbSchemas.value = schema
def toSchema(): Option[Schema] = cmbSchemas.getSelectionModel.selectedItem()
def control : Parent = cmbSchemas
def onChanged(useSchemas : () => Unit) : Unit = {
cmbSchemas.onAction = (ev: ActionEvent) => useSchemas()
}
}
|
aferrandi/dbtarzan
|
src/main/scala/dbtarzan/gui/config/connections/ComboSchemas.scala
|
Scala
|
apache-2.0
| 1,986
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.types.StringType
/**
* Unit tests for regular expression (regexp) related SQL expressions.
*/
class RegexpExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper {
/**
* Check if a given expression evaluates to an expected output, in case the input is
* a literal and in case the input is in the form of a row.
* @tparam A type of input
* @param mkExpr the expression to test for a given input
* @param input value that will be used to create the expression, as literal and in the form
* of a row
* @param expected the expected output of the expression
* @param inputToExpression an implicit conversion from the input type to its corresponding
* sql expression
*/
def checkLiteralRow[A](mkExpr: Expression => Expression, input: A, expected: Any)
(implicit inputToExpression: A => Expression): Unit = {
checkEvaluation(mkExpr(input), expected) // check literal input
val regex = 'a.string.at(0)
checkEvaluation(mkExpr(regex), expected, create_row(input)) // check row input
}
test("LIKE Pattern") {
// null handling
checkLiteralRow(Literal.create(null, StringType).like(_), "a", null)
checkEvaluation(Literal.create("a", StringType).like(Literal.create(null, StringType)), null)
checkEvaluation(Literal.create(null, StringType).like(Literal.create(null, StringType)), null)
checkEvaluation(
Literal.create("a", StringType).like(NonFoldableLiteral.create("a", StringType)), true)
checkEvaluation(
Literal.create("a", StringType).like(NonFoldableLiteral.create(null, StringType)), null)
checkEvaluation(
Literal.create(null, StringType).like(NonFoldableLiteral.create("a", StringType)), null)
checkEvaluation(
Literal.create(null, StringType).like(NonFoldableLiteral.create(null, StringType)), null)
// simple patterns
checkLiteralRow("abdef" like _, "abdef", true)
checkLiteralRow("a_%b" like _, "a\\__b", true)
checkLiteralRow("addb" like _, "a_%b", true)
checkLiteralRow("addb" like _, "a\\__b", false)
checkLiteralRow("addb" like _, "a%\\%b", false)
checkLiteralRow("a_%b" like _, "a%\\%b", true)
checkLiteralRow("addb" like _, "a%", true)
checkLiteralRow("addb" like _, "**", false)
checkLiteralRow("abc" like _, "a%", true)
checkLiteralRow("abc" like _, "b%", false)
checkLiteralRow("abc" like _, "bc%", false)
checkLiteralRow("a\nb" like _, "a_b", true)
checkLiteralRow("ab" like _, "a%b", true)
checkLiteralRow("a\nb" like _, "a%b", true)
// empty input
checkLiteralRow("" like _, "", true)
checkLiteralRow("a" like _, "", false)
checkLiteralRow("" like _, "a", false)
// SI-17647 double-escaping backslash
checkLiteralRow("""\\\\""" like _, """%\\%""", true)
checkLiteralRow("""%%""" like _, """%%""", true)
checkLiteralRow("""\__""" like _, """\\\__""", true)
checkLiteralRow("""\\\__""" like _, """%\\%\%""", false)
checkLiteralRow("""_\\\%""" like _, """%\\""", false)
// unicode
// scalastyle:off nonascii
checkLiteralRow("a\u20ACa" like _, "_\u20AC_", true)
checkLiteralRow("a€a" like _, "_€_", true)
checkLiteralRow("a€a" like _, "_\u20AC_", true)
checkLiteralRow("a\u20ACa" like _, "_€_", true)
// scalastyle:on nonascii
// invalid escaping
val invalidEscape = intercept[AnalysisException] {
evaluateWithoutCodegen("""a""" like """\a""")
}
assert(invalidEscape.getMessage.contains("pattern"))
val endEscape = intercept[AnalysisException] {
evaluateWithoutCodegen("""a""" like """a\""")
}
assert(endEscape.getMessage.contains("pattern"))
// case
checkLiteralRow("A" like _, "a%", false)
checkLiteralRow("a" like _, "A%", false)
checkLiteralRow("AaA" like _, "_a_", true)
// example
checkLiteralRow("""%SystemDrive%\Users\John""" like _, """\%SystemDrive\%\\Users%""", true)
}
Seq('/', '#', '\"').foreach { escapeChar =>
test(s"LIKE Pattern ESCAPE '$escapeChar'") {
// null handling
checkLiteralRow(Literal.create(null, StringType).like(_, escapeChar), "a", null)
checkEvaluation(
Literal.create("a", StringType).like(Literal.create(null, StringType), escapeChar), null)
checkEvaluation(
Literal.create(null, StringType).like(Literal.create(null, StringType), escapeChar), null)
checkEvaluation(Literal.create("a", StringType).like(
NonFoldableLiteral.create("a", StringType), escapeChar), true)
checkEvaluation(Literal.create("a", StringType).like(
NonFoldableLiteral.create(null, StringType), escapeChar), null)
checkEvaluation(Literal.create(null, StringType).like(
NonFoldableLiteral.create("a", StringType), escapeChar), null)
checkEvaluation(Literal.create(null, StringType).like(
NonFoldableLiteral.create(null, StringType), escapeChar), null)
// simple patterns
checkLiteralRow("abdef" like(_, escapeChar), "abdef", true)
checkLiteralRow("a_%b" like(_, escapeChar), s"a${escapeChar}__b", true)
checkLiteralRow("addb" like(_, escapeChar), "a_%b", true)
checkLiteralRow("addb" like(_, escapeChar), s"a${escapeChar}__b", false)
checkLiteralRow("addb" like(_, escapeChar), s"a%$escapeChar%b", false)
checkLiteralRow("a_%b" like(_, escapeChar), s"a%$escapeChar%b", true)
checkLiteralRow("addb" like(_, escapeChar), "a%", true)
checkLiteralRow("addb" like(_, escapeChar), "**", false)
checkLiteralRow("abc" like(_, escapeChar), "a%", true)
checkLiteralRow("abc" like(_, escapeChar), "b%", false)
checkLiteralRow("abc" like(_, escapeChar), "bc%", false)
checkLiteralRow("a\nb" like(_, escapeChar), "a_b", true)
checkLiteralRow("ab" like(_, escapeChar), "a%b", true)
checkLiteralRow("a\nb" like(_, escapeChar), "a%b", true)
// empty input
checkLiteralRow("" like(_, escapeChar), "", true)
checkLiteralRow("a" like(_, escapeChar), "", false)
checkLiteralRow("" like(_, escapeChar), "a", false)
// SI-17647 double-escaping backslash
checkLiteralRow(s"""$escapeChar$escapeChar$escapeChar$escapeChar""" like(_, escapeChar),
s"""%$escapeChar$escapeChar%""", true)
checkLiteralRow("""%%""" like(_, escapeChar), """%%""", true)
checkLiteralRow(s"""${escapeChar}__""" like(_, escapeChar),
s"""$escapeChar$escapeChar${escapeChar}__""", true)
checkLiteralRow(s"""$escapeChar$escapeChar${escapeChar}__""" like(_, escapeChar),
s"""%$escapeChar$escapeChar%$escapeChar%""", false)
checkLiteralRow(s"""_$escapeChar$escapeChar$escapeChar%""" like(_, escapeChar),
s"""%$escapeChar${escapeChar}""", false)
// unicode
// scalastyle:off nonascii
checkLiteralRow("a\u20ACa" like(_, escapeChar), "_\u20AC_", true)
checkLiteralRow("a€a" like(_, escapeChar), "_€_", true)
checkLiteralRow("a€a" like(_, escapeChar), "_\u20AC_", true)
checkLiteralRow("a\u20ACa" like(_, escapeChar), "_€_", true)
// scalastyle:on nonascii
// invalid escaping
val invalidEscape = intercept[AnalysisException] {
evaluateWithoutCodegen("""a""" like(s"""${escapeChar}a""", escapeChar))
}
assert(invalidEscape.getMessage.contains("pattern"))
val endEscape = intercept[AnalysisException] {
evaluateWithoutCodegen("""a""" like(s"""a$escapeChar""", escapeChar))
}
assert(endEscape.getMessage.contains("pattern"))
// case
checkLiteralRow("A" like(_, escapeChar), "a%", false)
checkLiteralRow("a" like(_, escapeChar), "A%", false)
checkLiteralRow("AaA" like(_, escapeChar), "_a_", true)
// example
checkLiteralRow(s"""%SystemDrive%${escapeChar}Users${escapeChar}John""" like(_, escapeChar),
s"""$escapeChar%SystemDrive$escapeChar%$escapeChar${escapeChar}Users%""", true)
}
}
test("RLIKE Regular Expression") {
checkLiteralRow(Literal.create(null, StringType) rlike _, "abdef", null)
checkEvaluation("abdef" rlike Literal.create(null, StringType), null)
checkEvaluation(Literal.create(null, StringType) rlike Literal.create(null, StringType), null)
checkEvaluation("abdef" rlike NonFoldableLiteral.create("abdef", StringType), true)
checkEvaluation("abdef" rlike NonFoldableLiteral.create(null, StringType), null)
checkEvaluation(
Literal.create(null, StringType) rlike NonFoldableLiteral.create("abdef", StringType), null)
checkEvaluation(
Literal.create(null, StringType) rlike NonFoldableLiteral.create(null, StringType), null)
checkLiteralRow("abdef" rlike _, "abdef", true)
checkLiteralRow("abbbbc" rlike _, "a.*c", true)
checkLiteralRow("fofo" rlike _, "^fo", true)
checkLiteralRow("fo\no" rlike _, "^fo\no$", true)
checkLiteralRow("Bn" rlike _, "^Ba*n", true)
checkLiteralRow("afofo" rlike _, "fo", true)
checkLiteralRow("afofo" rlike _, "^fo", false)
checkLiteralRow("Baan" rlike _, "^Ba?n", false)
checkLiteralRow("axe" rlike _, "pi|apa", false)
checkLiteralRow("pip" rlike _, "^(pi)*$", false)
checkLiteralRow("abc" rlike _, "^ab", true)
checkLiteralRow("abc" rlike _, "^bc", false)
checkLiteralRow("abc" rlike _, "^ab", true)
checkLiteralRow("abc" rlike _, "^bc", false)
intercept[java.util.regex.PatternSyntaxException] {
evaluateWithoutCodegen("abbbbc" rlike "**")
}
intercept[java.util.regex.PatternSyntaxException] {
val regex = 'a.string.at(0)
evaluateWithoutCodegen("abbbbc" rlike regex, create_row("**"))
}
}
test("RegexReplace") {
val row1 = create_row("100-200", "(\\d+)", "num")
val row2 = create_row("100-200", "(\\d+)", "###")
val row3 = create_row("100-200", "(-)", "###")
val row4 = create_row(null, "(\\d+)", "###")
val row5 = create_row("100-200", null, "###")
val row6 = create_row("100-200", "(-)", null)
val s = 's.string.at(0)
val p = 'p.string.at(1)
val r = 'r.string.at(2)
val expr = RegExpReplace(s, p, r)
checkEvaluation(expr, "num-num", row1)
checkEvaluation(expr, "###-###", row2)
checkEvaluation(expr, "100###200", row3)
checkEvaluation(expr, null, row4)
checkEvaluation(expr, null, row5)
checkEvaluation(expr, null, row6)
val nonNullExpr = RegExpReplace(Literal("100-200"), Literal("(\\d+)"), Literal("num"))
checkEvaluation(nonNullExpr, "num-num", row1)
// Test escaping of arguments
GenerateUnsafeProjection.generate(
RegExpReplace(Literal("\"quote"), Literal("\"quote"), Literal("\"quote")) :: Nil)
}
test("SPARK-22570: RegExpReplace should not create a lot of global variables") {
val ctx = new CodegenContext
RegExpReplace(Literal("100"), Literal("(\\d+)"), Literal("num")).genCode(ctx)
// four global variables (lastRegex, pattern, lastReplacement, and lastReplacementInUTF8)
// are always required, which are allocated in type-based global array
assert(ctx.inlinedMutableStates.length == 0)
assert(ctx.mutableStateInitCode.length == 4)
}
test("RegexExtract") {
val row1 = create_row("100-200", "(\\d+)-(\\d+)", 1)
val row2 = create_row("100-200", "(\\d+)-(\\d+)", 2)
val row3 = create_row("100-200", "(\\d+).*", 1)
val row4 = create_row("100-200", "([a-z])", 1)
val row5 = create_row(null, "([a-z])", 1)
val row6 = create_row("100-200", null, 1)
val row7 = create_row("100-200", "([a-z])", null)
val s = 's.string.at(0)
val p = 'p.string.at(1)
val r = 'r.int.at(2)
val expr = RegExpExtract(s, p, r)
checkEvaluation(expr, "100", row1)
checkEvaluation(expr, "200", row2)
checkEvaluation(expr, "100", row3)
checkEvaluation(expr, "", row4) // will not match anything, empty string get
checkEvaluation(expr, null, row5)
checkEvaluation(expr, null, row6)
checkEvaluation(expr, null, row7)
val expr1 = new RegExpExtract(s, p)
checkEvaluation(expr1, "100", row1)
val nonNullExpr = RegExpExtract(Literal("100-200"), Literal("(\\d+)-(\\d+)"), Literal(1))
checkEvaluation(nonNullExpr, "100", row1)
// invalid group index
val row8 = create_row("100-200", "(\\d+)-(\\d+)", 3)
val row9 = create_row("100-200", "(\\d+).*", 2)
val row10 = create_row("100-200", "\\d+", 1)
checkExceptionInExpression[IllegalArgumentException](
expr, row8, "Regex group count is 2, but the specified group index is 3")
checkExceptionInExpression[IllegalArgumentException](
expr, row9, "Regex group count is 1, but the specified group index is 2")
checkExceptionInExpression[IllegalArgumentException](
expr, row10, "Regex group count is 0, but the specified group index is 1")
// Test escaping of arguments
GenerateUnsafeProjection.generate(
RegExpExtract(Literal("\"quote"), Literal("\"quote"), Literal(1)) :: Nil)
}
test("SPLIT") {
val s1 = 'a.string.at(0)
val s2 = 'b.string.at(1)
val row1 = create_row("aa2bb3cc", "[1-9]+")
val row2 = create_row(null, "[1-9]+")
val row3 = create_row("aa2bb3cc", null)
checkEvaluation(
StringSplit(Literal("aa2bb3cc"), Literal("[1-9]+"), -1), Seq("aa", "bb", "cc"), row1)
checkEvaluation(
StringSplit(Literal("aa2bb3cc"), Literal("[1-9]+"), 2), Seq("aa", "bb3cc"), row1)
// limit = 0 should behave just like limit = -1
checkEvaluation(
StringSplit(Literal("aacbbcddc"), Literal("c"), 0), Seq("aa", "bb", "dd", ""), row1)
checkEvaluation(
StringSplit(Literal("aacbbcddc"), Literal("c"), -1), Seq("aa", "bb", "dd", ""), row1)
checkEvaluation(
StringSplit(s1, s2, -1), Seq("aa", "bb", "cc"), row1)
checkEvaluation(StringSplit(s1, s2, -1), null, row2)
checkEvaluation(StringSplit(s1, s2, -1), null, row3)
// Test escaping of arguments
GenerateUnsafeProjection.generate(
StringSplit(Literal("\"quote"), Literal("\"quote"), Literal(-1)) :: Nil)
}
test("SPARK-30759: cache initialization for literal patterns") {
val expr = "A" like Literal.create("a", StringType)
expr.eval()
val cache = expr.getClass.getSuperclass
.getDeclaredFields.filter(_.getName.endsWith("cache")).head
cache.setAccessible(true)
assert(cache.get(expr).asInstanceOf[java.util.regex.Pattern].pattern().contains("a"))
}
}
|
spark-test/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/RegexpExpressionsSuite.scala
|
Scala
|
apache-2.0
| 15,529
|
package models
import models.dao._
@SuppressWarnings(Array("org.wartremover.warts.FinalCaseClass"))
case class ItemBids(higherBid: Option[Bid], bidsList: List[Bid] = Nil)(implicit val item: Item) {
@SuppressWarnings(Array("org.wartremover.warts.TraversableOps"))
def this(bidsList: List[Bid], item: Item) =
this(if (bidsList.nonEmpty) Some(bidsList.max) else None, bidsList)(item)
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def withBid(bid: Bid): ItemBids = {
require(bid.item == item)
bid.value match {
case v if v <= 0 => this
case _ if higherBid.isEmpty => ItemBids(Some(bid), bid :: bidsList)
case v if v > higherBid.get.value => ItemBids(Some(bid), bid :: bidsList)
case _ => ItemBids(higherBid, bid :: bidsList)
}
}
}
|
jcranky/lojinha
|
app/models/ItemBids.scala
|
Scala
|
gpl-3.0
| 850
|
package battle
import battle.Ability.{Strength, Constitution, Dexterity, AbilityType}
import battle.Equippable.Attack
import battle.classes.{NoClass, GladiatorClass}
import battle.races.{Race, Human}
class Gladiator(val name: String,
abilities: Map[AbilityType, Ability],
val gladiatorClass: GladiatorClass,
val race: Race) {
private var _hitpoints = 5 + ability(Constitution).modifier
def armorClass = 10 + ability(Dexterity).modifier
def applyDamage(damage: Int): Unit = _hitpoints -= damage
def hitpoints = _hitpoints
def alive = hitpoints > 0
def ability(abilityType: AbilityType): Ability = abilities.getOrElse(abilityType, Ability())
def attackBonus(defender: Gladiator): Int = {
List(gladiatorClass, race).map(x => x.adjustment(Attack, this, defender)).sum +
ability(Strength).modifier
}
}
object Gladiator {
def apply(name: String,
abilities: Map[AbilityType, Ability] = Map(),
gladiatorClass: GladiatorClass = NoClass,
race: Race = Human) = new Gladiator(name, abilities, gladiatorClass, race)
}
|
bbalser/gladiator-actors
|
src/main/scala/battle/Gladiator.scala
|
Scala
|
cc0-1.0
| 1,128
|
package com.tribbloids.spookystuff.uav.planning
import com.tribbloids.spookystuff.row.SpookySchema
import org.apache.spark.ml.uav.{DVec, Vec}
object Constraints {
object AltitudeOnly extends Constraint {
override def rewrite(v: Vec, schema: SpookySchema): Vec = {
val alt = v(2)
new DVec(Array(0, 0, alt))
}
}
}
|
tribbloid/spookystuff
|
uav/src/main/scala/com/tribbloids/spookystuff/uav/planning/Constraints.scala
|
Scala
|
apache-2.0
| 338
|
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze
package object http {
type Headers = collection.Seq[(String, String)]
type Url = String
type Method = String
}
|
http4s/blaze
|
http/src/main/scala/org/http4s/blaze/http/package.scala
|
Scala
|
apache-2.0
| 738
|
import leon.lang.StaticChecks._
object StaticChecks2 {
def add(n: BigInt, m: BigInt): BigInt = {
require(n >= 0 && m >= 0)
var res = if(m == 0) n else add(n, m-1) + 1
assert(res >= 0)
res
} ensuring((res: BigInt) => res >= 0)
}
|
epfl-lara/leon
|
src/test/resources/regression/verification/purescala/valid/StaticChecks2.scala
|
Scala
|
gpl-3.0
| 252
|
package controllers
import javax.inject._
import actions.GeekAction
import daos.StripDAO
import org.bson.types.ObjectId
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}
/**
* This controller creates an `Action` that demonstrates how to write
* simple asynchronous code in a controller. It uses a timer to
* asynchronously delay sending a response for 1 second.
*
* @param exec We need an `ExecutionContext` to execute our
* asynchronous code.
*/
@Singleton
class StripController @Inject()(GeekAction: GeekAction, stripDAO: StripDAO)(implicit exec: ExecutionContext) extends Controller {
/**
*
*/
def getOne(stripId: ObjectId): Action[AnyContent] = Action.async {Future.successful(NotImplemented)}
/**
*
*/
def getAll(comicId: ObjectId): Action[AnyContent] = GeekAction.async { request =>
stripDAO.select(comicId, request.geekId).map(Ok(_))
}
}
|
comicgator/comicgator
|
maestro/app/controllers/StripController.scala
|
Scala
|
mit
| 906
|
package breeze.text.transform
import breeze.data._
import breeze.linalg.Counter
/**
* Filter that removes rare word that occur in fewer than threshold documents
* Syntax: new RemoveRareWords(10) apply (data)
*
* @author dlwh
*/
class RemoveRareWords(threshold: Int = 10) {
def apply[T, Obs <: Observation[Seq[T]]](data: Seq[Obs]) = {
val c = Counter[T, Int]()
for {
d <- data
w <- d.features.toSet[T]
} {
c(w) += 1
}
for (d <- data)
yield for (seq <- d)
yield for (w <- seq if c(w) >= threshold) yield w
}
}
|
tjhunter/scalanlp-core
|
process/src/main/scala/breeze/text/transform/RemoveRareWords.scala
|
Scala
|
apache-2.0
| 566
|
/*
* TimelineViewState.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite.impl.state
import de.sciss.audiowidgets.{TimelineModel, TransportCatch}
import de.sciss.lucre.swing.LucreSwing.requireEDT
import de.sciss.lucre.{BooleanObj, DoubleObj, LongObj, Obj, SpanLikeObj, SpanObj, Txn}
import de.sciss.mellite.{GUI, ViewState}
import de.sciss.numbers.Implicits._
import de.sciss.span.Span
import scala.math.sqrt
import scala.swing.Slider
object TimelineViewState {
final val Key_Position = "tl-pos"
final val Key_Visible = "tl-vis"
final val Key_Selection = "tl-sel"
final val Key_Catch = "catch"
final val Key_VisualBoost = "vis-boost"
// final val Default_VisualBoostMin = 1.0
// final val Default_VisualBoostMax = 512.0
}
/** Tracks view state for a Timeline based object with transport.
* Tracks timeline position, visible and selected span, as well as visual boost and transport catch.
*/
class TimelineViewState[T <: Txn[T]](
keyPosition : String = TimelineViewState.Key_Position,
keyVisible : String = TimelineViewState.Key_Visible,
keySelection : String = TimelineViewState.Key_Selection,
keyCatch : String = TimelineViewState.Key_Catch,
keyVisualBoost: String = TimelineViewState.Key_VisualBoost,
visBoostMin : Double = GUI.Default_VisualBoostMin,
visBoostMax : Double = GUI.Default_VisualBoostMax,
) {
@volatile
private var statePosition = 0L
private var dirtyPosition = false
@volatile
private var stateVisible = Span(0L, 0L)
private var dirtyVisible = false
@volatile
private var stateSelection = Span.Void: Span.SpanOrVoid
private var dirtySelection = false
@volatile
private var stateCatch = true
private var dirtyCatch = false
@volatile
private var stateVisualBoost = sqrt(visBoostMin * visBoostMax)
private var dirtyVisualBoost = false
def entries(set0: Set[ViewState] = Set.empty): Set[ViewState] = {
requireEDT()
var res = set0
if (dirtyPosition ) res += ViewState(keyPosition , LongObj , statePosition )
if (dirtyVisible ) res += ViewState(keyVisible , SpanObj , stateVisible )
if (dirtySelection ) res += ViewState(keySelection , SpanLikeObj , stateSelection )
if (dirtyCatch ) res += ViewState(keyCatch , BooleanObj , stateCatch )
if (dirtyVisualBoost) res += ViewState(keyVisualBoost , DoubleObj , stateVisualBoost)
res
}
def init(tAttr: Obj.AttrMap[T])(implicit tx: T): Unit = {
tAttr.$[LongObj](keyPosition).foreach { v =>
statePosition = v.value
}
tAttr.$[SpanObj](keyVisible).foreach { v =>
stateVisible = v.value
}
tAttr.$[SpanLikeObj](keySelection).foreach { v =>
v.value match {
case sp: Span => stateSelection = sp
case _ =>
}
}
tAttr.$[BooleanObj](keyCatch).foreach { v =>
stateCatch = v.value
}
tAttr.$[DoubleObj](keyVisualBoost).foreach { v =>
stateVisualBoost = v.value
}
}
/**
* @param visBoost can be `null` if it does not apply
*/
def initGUI(tlm: TimelineModel.Modifiable, cch: TransportCatch, visBoost: Slider): Unit = {
tlm.position = statePosition
if (stateVisible.nonEmpty) {
tlm.visible = stateVisible
}
tlm.selection = stateSelection
tlm.addListener {
case TimelineModel.Position (_, p) =>
statePosition = p.now
dirtyPosition = true
case TimelineModel.Visible(_, sp) =>
stateVisible = sp.now
dirtyVisible = true
case TimelineModel.Selection(_, sp) =>
stateSelection = sp.now
dirtySelection = true
}
cch.catchEnabled = stateCatch
cch.addListener {
case b =>
stateCatch = b
dirtyCatch = true
}
if (visBoost != null) {
if (stateVisualBoost >= visBoostMin && stateVisualBoost <= visBoostMax) {
val visBoostView0 = (stateVisualBoost.expLin(
visBoostMin, visBoostMax, visBoost.min, visBoost.max) + 0.5).toInt
.clip(visBoost.min, visBoost.max)
visBoost.value = visBoostView0
}
visBoost.peer.addChangeListener { _ =>
val visBoostModel = visBoost.value.linExp(visBoost.min, visBoost.max, visBoostMin, visBoostMax)
if (stateVisualBoost != visBoostModel) {
stateVisualBoost = visBoostModel
dirtyVisualBoost = true
}
}
}
}
}
|
Sciss/Mellite
|
app/src/main/scala/de/sciss/mellite/impl/state/TimelineViewState.scala
|
Scala
|
agpl-3.0
| 4,993
|
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.elements
import org.argus.jawa.core.java_signatures.FieldSignature
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
case class FieldFQN(field_signature: FieldSignature) extends JavaKnowledge {
def this(owner: JawaType, fieldName: String, typ: JawaType) =
this(FieldSignature(owner = Some(owner.javaType), name = fieldName, fieldType = Some(typ.javaType)))
def this(fqn: String, typ: JawaType) =
this(JavaKnowledge.getClassTypeFromFieldFQN(fqn), JavaKnowledge.getFieldNameFromFieldFQN(fqn), typ)
val owner: JawaType = JawaType(field_signature.getOwner)
val fieldName: String = field_signature.name
val typ: JawaType = JawaType(field_signature.getFieldType)
def fqn: String = (owner.jawaName + "." + fieldName).intern()
override def toString: String = (owner.jawaName + "." + fieldName + ":" + typ.jawaName).intern()
}
|
arguslab/Argus-SAF
|
jawa/src/main/scala/org/argus/jawa/core/elements/FieldFQN.scala
|
Scala
|
apache-2.0
| 1,247
|
class Usuario(nom : String) {
// Atributos
val nombre:String = nom
// Metodos
def saludar:String = s"Saludo de $nombre"
override def toString = s"Usuario($nombre)"
}
// Parametro del constructor convertido a atributo
class UsuarioMaligno(val nombre: String) {
def saludar:String = s"Saludo tenebroso de $nombre"
override def toString = s"Usuario maligno($nombre)"
}
val us1 = new Usuario("pepe")
val us2 = new UsuarioMaligno("saruman")
println(us1)
println(us2)
val usuarios = List(us1, new Usuario("alvaro"))
println(usuarios)
// Hace un mapeo de cada usuario de la lista con su size del nombre
val tam = usuarios map (_.nombre.size)
println(tam)
val ordenados = usuarios sortBy (_.nombre)
println(ordenados)
val cong = usuarios find (_.nombre contains "g")
// son equivalentes cong y cong2
val cong2 = usuarios find(usuario => usuario.nombre contains "g")
println(cong)
println(cong2)
// Su no hubiera objeto alguno en cong imprime "no usuario"
val saludo = cong map (_.saludar) getOrElse "no usuario"
println(saludo)
|
romanarranz/NTP
|
S3/clase_usuario_parametros.scala
|
Scala
|
mit
| 1,035
|
package cn.gridx.scala.lang.classes.constructors.fields.visibility
/**
* Created by tao on 8/20/15.
*/
object Test extends App {
val cat = new Cat(100, "200", true, "300")
// println(cat.x + " | " + cat.y)
}
|
TaoXiao/Scala
|
lang/src/main/scala/cn/gridx/scala/lang/classes/constructors/fields/visibility/Test.scala
|
Scala
|
apache-2.0
| 219
|
package gwi.mawex.worker
import akka.actor.{ActorRef, ActorSystem, Address, AddressFromURIString, Props, RootActorPath}
import akka.cluster.client.{ClusterClient, ClusterClientSettings}
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import gwi.mawex.RemoteService.HostAddress
import gwi.mawex._
import gwi.mawex.executor._
import org.backuity.clist.{Cli, Command, arg, opt}
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
object WorkerCmd extends Command(name = "workers", description = "launches workers") with ClusterService with MountingService with LazyLogging {
var configMapName = opt[Option[String]](useEnv = true, name="config-map-name", description = "Name of the config map that holds files to mount")
var consumerGroups = opt[List[String]](useEnv = true, default = List("default"), description = "sum,add,divide - 3 workers in 3 consumer groups")
var pod = opt[String](useEnv = true, default = "default", description = "Workers within the same pod are executing sequentially")
var masterId = opt[String](useEnv = true, default = "master", name="master-id")
var executorType = opt[String](useEnv = true, default = "forked", name = "executor-type", description = "local / forked / k8s")
var sandboxJvmOpts = opt[Option[String]](useEnv = true, name = "sandbox-jvm-opts", description = "Whether to execute task in a forked process and with what JVM options")
var sandboxCheckInterval = opt[Int](useEnv = true, default = 2*60, name = "sandbox-check-interval", description = "Interval in seconds of checking for jobs spawned in sandbox")
var sandboxCheckLimit = opt[Int](useEnv = true, default = 15, name = "sandbox-check-limit", description = "How many times to check whether job spawned in sandbox is alive")
var forkedJvmClassPath = opt[String](useEnv = true, default = "lib/*", name = "forked-jvm-class-path", description = "Class path for the fork jvm executor")
var k8sNamespace = opt[String](useEnv = true, default = "default", name = "k8s-namespace", description = "What namespace to execute k8s jobs at")
var k8sDockerImage = opt[Option[String]](useEnv = true, name = "k8s-docker-image", description = "What docker image to run job with")
var k8sResourcesLimitsCpu = opt[String](useEnv = true, default = "150m", name = "k8s-resources-limits-cpu", description = "k8s resource limits")
var k8sResourcesLimitsMem = opt[String](useEnv = true, default = "100Mi", name = "k8s-resources-limits-memory", description = "k8s resource limits")
var k8sResourcesRequestsCpu = opt[String](useEnv = true, default = "50m", name = "k8s-resources-requests-cpu", description = "k8s resource limits")
var k8sResourcesRequestsMem = opt[String](useEnv = true, default = "100Mi", name = "k8s-resources-requests-memory", description = "k8s resource limits")
var k8sClientDebugMode = opt[Boolean](useEnv = true, default = false, name = "k8s-client-debug-mode", description = "k8s client debug mode")
var executorClass = arg[String](name="executor-class", description = "Full class name of executor Actor")
var commandBuilderClass = arg[Option[String]](required = false, name="command-builder-class", description = "Full class name of MawexCommandBuilder")
var commandBuilderArgs = arg[Option[String]](required = false, name="command-args", description = "Arguments to be passed to MawexCommandBuilder")
private def getExecutorResources =
K8Resources(
k8sResourcesLimitsCpu,
k8sResourcesLimitsMem,
k8sResourcesRequestsCpu,
k8sResourcesRequestsMem
)
private def workerActorRef(masterId: String, clusterClient: ActorRef, workerId: WorkerId, taskTimeout: FiniteDuration, sandBoxProps: Props, system: ActorSystem): ActorRef =
system.actorOf(Worker.props(masterId, clusterClient, workerId, sandBoxProps, taskTimeout), s"worker-${workerId.id}")
private def workerClusterClient(seedNodes: List[HostAddress], system: ActorSystem): ActorRef = {
val initialContacts =
seedNodes
.map { case HostAddress(host, port) => s"akka.tcp://ClusterSystem@$host:$port" }
.map { case AddressFromURIString(addr) => RootActorPath(addr) / "system" / "receptionist" }
.toSet
system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), "clusterClient")
}
private def buildCommand(clazz: Class[_], args: Seq[String], config: Config) = {
Cli.parse(("command" +: args).toArray)
.withCommands(clazz.newInstance().asInstanceOf[MawexCommandBuilder[MawexCommand]])
.map(_.build(config))
.getOrElse(throw new IllegalArgumentException(s"Invalid arguments : " + args.mkString("\n", "\n", "\n")))
}
private def getSandBoxProps(executorProps: Props, consumerGroup: String) = executorType match {
case "local" =>
logger.info(s"Local mode enabled on worker")
SandBox.localJvmProps(executorProps)
case "forked" =>
logger.info(s"Forked mode enabled on worker")
SandBox.forkingProps(
executorProps,
ForkedJvmConf(forkedJvmClassPath, sandboxCheckInterval.seconds, sandboxCheckLimit),
ExecutorCmd.forkedCmd(sandboxJvmOpts, getMountPath)
)
case "k8s" =>
logger.info(s"K8s mode enabled on worker")
val k8Image = k8sDockerImage.getOrElse(throw new IllegalArgumentException("k8sDockerImage not specified !!!"))
SandBox.k8JobProps(
executorProps,
K8JobConf(k8Image, k8sNamespace, getExecutorResources, k8sClientDebugMode, sandboxCheckInterval.seconds, sandboxCheckLimit),
ExecutorCmd.k8sCmd(sandboxJvmOpts, getMountPath, configMapName)
)
case x =>
throw new IllegalArgumentException(s"Executor type $x is not valid, please choose between local / forked / k8s")
}
def run(): Unit = {
val system = RemoteService.buildRemoteSystem(Address("akka.tcp", Worker.SystemName, Some(hostAddress.host), Some(hostAddress.port)), getAppConf)
val commandArgSeq = commandBuilderArgs.map(_.split(" ").filter(_.nonEmpty).toSeq).getOrElse(Seq.empty)
val commandOpt = commandBuilderClass.map( className => buildCommand(Class.forName(className), commandArgSeq, system.settings.config))
val executorClazz = Class.forName(executorClass)
val executorProps = commandOpt.fold(Props(executorClazz))(cmd => Props(executorClazz, cmd))
val clusterClient = workerClusterClient(seedNodes, system)
consumerGroups.foreach { consumerGroup =>
workerActorRef(
masterId,
clusterClient,
WorkerId(consumerGroup, pod),
((sandboxCheckLimit * sandboxCheckInterval) + 10).seconds,
getSandBoxProps(executorProps, consumerGroup),
system
)
}
system.whenTerminated.onComplete(_ => System.exit(0))(ExecutionContext.Implicits.global)
sys.addShutdownHook(Await.result(system.terminate(), 10.seconds))
}
}
|
GlobalWebIndex/mawex
|
src/core/src/main/scala/gwi/mawex/worker/WorkerCmd.scala
|
Scala
|
mit
| 7,143
|
package akka.persistence.jdbc.integration
import akka.persistence.jdbc.query.{ EventAdapterTest, MysqlCleaner, OracleCleaner, PostgresCleaner, SqlServerCleaner }
class PostgresScalaEventAdapterTest extends EventAdapterTest("postgres-application.conf") with PostgresCleaner
class MySQLScalaEventAdapterTest extends EventAdapterTest("mysql-application.conf") with MysqlCleaner
class OracleScalaEventAdapterTest extends EventAdapterTest("oracle-application.conf") with OracleCleaner
class SqlServerScalaEventAdapterTest extends EventAdapterTest("sqlserver-application.conf") with SqlServerCleaner
|
dnvriend/akka-persistence-jdbc
|
core/src/it/scala/akka/persistence/jdbc/integration/EventAdapterTest.scala
|
Scala
|
apache-2.0
| 599
|
package co.rc.smserviceclient.infrastructure.acl.dtos.responses
import argonaut._, Argonaut._
/**
* Class that represents an error response
* @param response Error response message
* @param statusCode Error response status code
*/
case class ErrorResponseDTO( response: String,
statusCode: Option[ Int ] = None ) extends HandledResponse
/**
* Companion object for ErrorResponseDTO
*/
object ErrorResponseDTO {
/**
* Implicit marshaller for ErrorResponseDTO
* @return ErrorResponseDTO CodecJson
*/
implicit def ErrorResponseDTOCodec: CodecJson[ ErrorResponseDTO ] =
casecodec2( ErrorResponseDTO.apply, ErrorResponseDTO.unapply )( "response", "statusCode" )
}
|
rodricifuentes1/session-manager-service-client
|
src/main/scala/co/rc/smserviceclient/infrastructure/acl/dtos/responses/ErrorResponseDTO.scala
|
Scala
|
mit
| 686
|
package com.jayway.textmining
import com.weiglewilczek.slf4s.Logging
import java.io.File
import scalaz.{Failure, Success}
/**
* Copyright 2012 Amir Moulavi (amir.moulavi@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Amir Moulavi
*/
class Buckshot(files:List[(String, String)], k:Int)
extends RandomSelector
with Logging { this:FeatureSelection =>
require(k < files.size, "K can not be greater than number of documents")
require(k > 0, "K must be a positive non-zero integer")
val fileContents:List[String] = files.map( _._2 )
val documents:List[Document] = selectFeatures(files)
val vectorSpace = VectorSpace()
documents.foreach( d => vectorSpace.addDimension(d.uniqueNouns))
val mathUtils = MathUtils(vectorSpace)
def clusterDocument:List[Cluster] = {
val selectedFiles = `select kd random document`
val remainingFiles = files diff selectedFiles
val remainingDocs = convertFilesToDocuments(remainingFiles)
val clusters = (new HierarchicalAgglomerativeCluster(selectedFiles, k) with NLPFeatureSelection).clusterDocuments()
clusters.foreach( c => c.calculateNewCentroid() )
remainingDocs.foreach { d =>
val distances = clusters.map( c => (c, mathUtils.euclideanDistance(d, c.centroid)) )
val closestCluster = distances.sortWith( (c1, c2) => c1._2.compareTo(c2._2) < 0).head._1
closestCluster.addDocument(d)
}
clusters
}
private def convertFilesToDocuments(list:List[(String, String)]):List[Document] = {
selectFeatures(list)
}
private def `select kd random document`:List[(String, String)] =
selectRandom[(String, String)](math.sqrt((k*documents.size).asInstanceOf[Double]).asInstanceOf[Int], files)
}
|
amir343/grape
|
src/main/scala/com/jayway/textmining/Buckshot.scala
|
Scala
|
apache-2.0
| 2,233
|
package com.seanshubin.detangler.report
import org.scalatest.FunSuite
class DependencyTemplateRulesTest extends FunSuite {
test("standalone detail") {
//given
val standaloneTemplateText =
"""<div class="standalone-dependency">
| <p class="caption">replace-me</p>
| <ul class="standalone-append-dependency-row">
| <li class="standalone-dependency-row">
| <p><a class="name" href="replace-me">replace-me</a></p>
| <p><a class="cycle-link" href="">↻</a></p>
| <p class="depth">replace-me</p>
| <p class="breadth">replace-me</p>
| <p class="transitive">replace-me</p>
| <p><a class="reason" href="replace-me">replace-me</a></p>
| <p><a class="composed-of" href="replace-me">replace-me</a></p>
| </li>
| </ul>
|</div>
""".stripMargin
val standaloneDetailTemplate = HtmlElement.fragmentFromString(standaloneTemplateText)
val standaloneDetailTemplateRules = new DependencyTemplateRulesImpl(
SampleData.detangled, DependencyDirection.TowardDependsOn)
//when
val Some(actual) = standaloneDetailTemplateRules.generate(standaloneDetailTemplate, SampleData.root, SampleData.groupA)
//then
assert(actual.select(".caption").text() === "depends on (1)")
assert(actual.select(".name").attr("href") === "#group-b")
assert(actual.select(".name").text() === "group/b")
assert(actual.select(".depth").text() === "0")
assert(actual.select(".breadth").text() === "0")
assert(actual.select(".transitive").text() === "0")
assert(actual.select(".reason").attr("href") === "#group-a---group-b")
assert(actual.select(".reason").text() === "reason")
}
}
|
SeanShubin/detangler
|
report/src/test/scala/com/seanshubin/detangler/report/DependencyTemplateRulesTest.scala
|
Scala
|
unlicense
| 1,773
|
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.spark.wrappers.estimators
import org.apache.spark.ml.feature.{IDF => SparkIDF, IDFModel => SparkIDFModel}
import io.deepsense.deeplang.doperables.SparkSingleColumnEstimatorWrapper
import io.deepsense.deeplang.doperables.spark.wrappers.models.IDFModel
import io.deepsense.deeplang.params.Param
import io.deepsense.deeplang.params.validators.RangeValidator
import io.deepsense.deeplang.params.wrappers.spark.IntParamWrapper
class IDFEstimator extends SparkSingleColumnEstimatorWrapper[SparkIDFModel, SparkIDF, IDFModel] {
val minDocFreq = new IntParamWrapper[SparkIDF](
name = "min documents frequency",
description = Some("The minimum number of documents in which a term should appear."),
sparkParamGetter = _.minDocFreq,
validator = RangeValidator(begin = 0.0, end = Int.MaxValue, step = Some(1.0)))
setDefault(minDocFreq, 0.0)
override protected def getSpecificParams: Array[Param[_]] = Array(minDocFreq)
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/doperables/spark/wrappers/estimators/IDFEstimator.scala
|
Scala
|
apache-2.0
| 1,576
|
package data
import java.util.concurrent.TimeUnit
import java.util.function.Supplier
import akka.actor.ActorSystem
import play.libs.F.Promise
import play.libs.HttpExecution
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
import akka.pattern.after
class FutureUtil(actorSystem: ActorSystem) {
/**
* Return a Scala Future that will be redeemed with the given message after the specified delay.
*
* @param message
* @param delay
* @param unit
* @param ec
* @tparam A
* @return
*/
def timeout[A](message: => A, delay: Long, unit: TimeUnit = TimeUnit.MILLISECONDS)(implicit ec: ExecutionContext): Future[A] = {
after(FiniteDuration(delay, TimeUnit.MILLISECONDS), actorSystem.scheduler)(Future(message))
}
/**
* Return a Java Promise that will be redeemed with the given message after the specified delay.
*
* @param message
* @param delay
* @param unit
* @tparam A
* @return
*/
def timeout[A](message: Supplier[A], delay: Long, unit: TimeUnit): Promise[A] = {
timeout(message, delay, unit, HttpExecution.defaultContext())
}
/**
* Return a Java Promise that will be redeemed with the given message after the specified delay.
*
* @param message
* @param delay
* @param unit
* @param ec
* @tparam A
* @return
*/
def timeout[A](message: Supplier[A], delay: Long, unit: TimeUnit, ec: ExecutionContext): Promise[A] = {
Promise.wrap(timeout(message.get(), delay)(ec))
}
}
|
brikis98/ping-play
|
sample-app-common/src/main/scala/data/FutureUtil.scala
|
Scala
|
mit
| 1,526
|
package com.github.holothuroid.covella.examples
import com.github.holothuroid.covella._
/**
* The calendar of the Maya people, as best as I understand it.
* Includes Tzolkin, Haab, Long Count and Lords of Night.
* Timestamps are set in regard to 1970-01-01 CE for synchronisation with other calendars.
* @param days The precision of timekeeping. By standard, counting seconds.
*/
case class MayaCalendar(days: TimeUnit = CommonDays.days) {
/** Long count
* Twenty of these k'ins [days] are known as a winal or uinal.
* Eighteen winals make one tun.
* Twenty tuns are known as a k'atun.
* Twenty k'atuns make a b'ak'tun. */
lazy val longCountCalendar = Calendar(baktuns) setTimestampZero
Datum.of(
'baktun -> 12,
'katun -> 17,
'tun -> 16,
'winal -> 7,
'kin ->5) // This corresponds to 1970-01-01, i.e. unix epoch
lazy val kins = 'kin isAliasFor days
lazy val winals = 'winal of (kins,20)
lazy val tuns = 'tun of (winals,18)
lazy val katuns = 'katun of (tuns,20)
lazy val baktuns = 'baktun of (katuns,20)
/** Haab
18 months of 20 days and 5 empty days */
lazy val haabCalendar = Calendar(haabYears) setTimestampZero
Datum.of('haabYear -> 0, 'haabMonth-> 14, 'haabMonthDay -> 3) // 3 K'ank'in
// Alias is required to prevent variable shadowing
private lazy val haabMonthDays = 'haabMonthDay isAliasFor days
lazy val haabMonth = 'haabMonth of (haabMonthDays,20)
lazy val wayebs = 'haabMonth of (haabMonthDays,5)
lazy val haabYears = 'haabYear isCycleOf ( haabMonth*18 :+ wayebs :_*) withNames
("Pop","Wo'","Sip","Sotz'","Sek",
"Xul","Yaxk'in'","Mol","Ch'en","Yax",
"Sak'","Keh","Mak","K'ank'in","Muwan'",
"Pax","K'ayab", "Kumk'u","Wayeb'") withOffset 1 // months are counted from 1
/** Tzolkin
* Year consisting of two cycles:
* - A cycle of numbers ranging from 1 to 13
* - A cycle of 20 names.
* Thus forming a year of 260 days.
* Modeled here by synchronizing two SimpleCalendars. */
lazy val tzolkinCalendar =
Calendar(tzolkinNumbers) setTimestampZero
Datum.of('tzolkinNumberCycle-> 0, 'tzolkinNumberDay->13) synchronise
Calendar(tzolkinNames).setTimestampZero(
Datum.of('tzolkinNameCycle->0, 'tzolkinNameDay ->5 )) // = Chickchan
// Aliases are required to prevent variable shadowing
private lazy val tzolkinNumberDays = 'tzolkinNumberDay isAliasFor days
private lazy val tzolkinNameDays = 'tzolkinNameDay isAliasFor days
lazy val tzolkinNumbers =
'tzolkinNumberCycle of (tzolkinNumberDays,13) withOffset 1
lazy val tzolkinNames = 'tzolkinNameCycle of tzolkinNameDays withNames
("Imix'", "Ik'", "Ak'b'al", "K'an", "Chickchan",
"Kimi", "Manik'", "Lamat", "Muluk", "Ok",
"Chuwen", "Eb'", "B'en", "Ix", "Men",
"K'ib'", "Kab'an", "Etz'nab'", "Kwak", "Ajaw"
) withOffset 1
/**
* The Haab and Tzolkin Calendar round of approximately 52 years.
*/
lazy val calendarRound = calendarRoundPre add ('yearBearer -> calculateYearBearer )
private[covella] lazy val calendarRoundPre = haabCalendar synchronise tzolkinCalendar
private def calculateYearBearer(datum: Datum) : (Int,String) =
datum.get('haabYear)
.map(x => Datum.of('haabYear -> x))
.map(_.completeWithCalendar(calendarRoundPre))
.flatMap(_.begins)
.map(_.inCalendar(calendarRoundPre))
.map(x => (x.get('tzolkinNumberDay).getOrElse(BigInt(-1)).toInt,
x.getName('tzolkinNameDay).getOrElse("UNKNOWN_TZOLKIN")))
.getOrElse((0,"UNKNOWN_YEARBEARER"))
/**
* Lords of Night are a cycle of 9 days, each dedicated to a supernatural entity.
* Since the Maya names are unknown they are usually given as G1 - G9.
* 1970-01-01 CE happens to be G1.
*/
lazy val lordsOfNightCalendar = Calendar(lordsOfNight)
private lazy val nights = 'lordOfNight isAliasFor days
lazy val lordsOfNight = 'lordOfNightCycle of (nights,9) withOffset 1
/**
* The Long Count combined with the Haab and Tzolkin Calendar Round as well as the Lords of Night.
*/
lazy val mayaCalendar = longCountCalendar synchronise calendarRound synchronise lordsOfNightCalendar
}
object MayaCalendar{
/**
* Includes DateFormats for the single places of the Long Count.
*/
object LongCountParts {
lazy val k = num('kin)
lazy val w = num('winal)
lazy val t = num('tun)
lazy val kat = num('katun)
lazy val b = num('baktun)
}
import LongCountParts._
/**
* Long Count date format giving numbers for baktun, katun, tun, winal and kin.
*/
lazy val longCountFormat : DateFormat = df"$b.$kat.$t.$w.$k"
/**
* DateFormat for Haab, giving number of day in month and month name.
*/
lazy val haabFormat = df"${num('haabMonthDay)} ${nam('haabMonth)}"
/** DateFormat for Tzolkin, giving number and name.
*/
lazy val tzolkinFormat = df"${num('tzolkinNumberDay)} ${nam('tzolkinNameDay)}"
/**
* DateFormat combining Tzolkin and Haab formats.
*/
lazy val calendarRoundFormat : DateFormat = df"$tzolkinFormat $haabFormat"
/**
* DateFormat giving the Lord of Night for a date as G1 - G9.
*/
lazy val lordsOfNightFormat = df"G${num('lordOfNight,1)}"
/**
* DateFormat extracting the Year Bearer from with number and name from a Datum.
*/
lazy val yearBearerFormat = df"${num('yearBearer)} ${nam('yearBearer)}"
/**
* DateFormat with Long Count, Tzolkin, Haab, Year Bearer and Lord of Night
*/
lazy val mayaFormat = df"$longCountFormat $tzolkinFormat $haabFormat, $lordsOfNightFormat, YB: $yearBearerFormat"
}
|
Holothuroid/Covella
|
covella/examples/MayaCalendar.scala
|
Scala
|
mit
| 5,811
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.time.Instant
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.unsafe.types.CalendarInterval
class DateFunctionsSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("function current_date") {
val df1 = Seq((1, 2), (3, 1)).toDF("a", "b")
val d0 = DateTimeUtils.millisToDays(System.currentTimeMillis())
val d1 = DateTimeUtils.fromJavaDate(df1.select(current_date()).collect().head.getDate(0))
val d2 = DateTimeUtils.fromJavaDate(
sql("""SELECT CURRENT_DATE()""").collect().head.getDate(0))
val d3 = DateTimeUtils.millisToDays(System.currentTimeMillis())
assert(d0 <= d1 && d1 <= d2 && d2 <= d3 && d3 - d0 <= 1)
}
test("function current_timestamp and now") {
val df1 = Seq((1, 2), (3, 1)).toDF("a", "b")
checkAnswer(df1.select(countDistinct(current_timestamp())), Row(1))
// Execution in one query should return the same value
checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = CURRENT_TIMESTAMP()"""), Row(true))
// Current timestamp should return the current timestamp ...
val before = System.currentTimeMillis
val got = sql("SELECT CURRENT_TIMESTAMP()").collect().head.getTimestamp(0).getTime
val after = System.currentTimeMillis
assert(got >= before && got <= after)
// Now alias
checkAnswer(sql("""SELECT CURRENT_TIMESTAMP() = NOW()"""), Row(true))
}
val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val sdfDate = new SimpleDateFormat("yyyy-MM-dd", Locale.US)
val d = new Date(sdf.parse("2015-04-08 13:10:15").getTime)
val ts = new Timestamp(sdf.parse("2013-04-08 13:10:15").getTime)
test("timestamp comparison with date strings") {
val df = Seq(
(1, Timestamp.valueOf("2015-01-01 00:00:00")),
(2, Timestamp.valueOf("2014-01-01 00:00:00"))).toDF("i", "t")
checkAnswer(
df.select("t").filter($"t" <= "2014-06-01"),
Row(Timestamp.valueOf("2014-01-01 00:00:00")) :: Nil)
checkAnswer(
df.select("t").filter($"t" >= "2014-06-01"),
Row(Timestamp.valueOf("2015-01-01 00:00:00")) :: Nil)
}
test("date comparison with date strings") {
val df = Seq(
(1, Date.valueOf("2015-01-01")),
(2, Date.valueOf("2014-01-01"))).toDF("i", "t")
checkAnswer(
df.select("t").filter($"t" <= "2014-06-01"),
Row(Date.valueOf("2014-01-01")) :: Nil)
checkAnswer(
df.select("t").filter($"t" >= "2015"),
Row(Date.valueOf("2015-01-01")) :: Nil)
}
test("date format") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(date_format($"a", "y"), date_format($"b", "y"), date_format($"c", "y")),
Row("2015", "2015", "2013"))
checkAnswer(
df.selectExpr("date_format(a, 'y')", "date_format(b, 'y')", "date_format(c, 'y')"),
Row("2015", "2015", "2013"))
}
test("year") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(year($"a"), year($"b"), year($"c")),
Row(2015, 2015, 2013))
checkAnswer(
df.selectExpr("year(a)", "year(b)", "year(c)"),
Row(2015, 2015, 2013))
}
test("quarter") {
val ts = new Timestamp(sdf.parse("2013-11-08 13:10:15").getTime)
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(quarter($"a"), quarter($"b"), quarter($"c")),
Row(2, 2, 4))
checkAnswer(
df.selectExpr("quarter(a)", "quarter(b)", "quarter(c)"),
Row(2, 2, 4))
}
test("month") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(month($"a"), month($"b"), month($"c")),
Row(4, 4, 4))
checkAnswer(
df.selectExpr("month(a)", "month(b)", "month(c)"),
Row(4, 4, 4))
}
test("dayofmonth") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(dayofmonth($"a"), dayofmonth($"b"), dayofmonth($"c")),
Row(8, 8, 8))
checkAnswer(
df.selectExpr("day(a)", "day(b)", "dayofmonth(c)"),
Row(8, 8, 8))
}
test("dayofyear") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(dayofyear($"a"), dayofyear($"b"), dayofyear($"c")),
Row(98, 98, 98))
checkAnswer(
df.selectExpr("dayofyear(a)", "dayofyear(b)", "dayofyear(c)"),
Row(98, 98, 98))
}
test("hour") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(hour($"a"), hour($"b"), hour($"c")),
Row(0, 13, 13))
checkAnswer(
df.selectExpr("hour(a)", "hour(b)", "hour(c)"),
Row(0, 13, 13))
}
test("minute") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(minute($"a"), minute($"b"), minute($"c")),
Row(0, 10, 10))
checkAnswer(
df.selectExpr("minute(a)", "minute(b)", "minute(c)"),
Row(0, 10, 10))
}
test("second") {
val df = Seq((d, sdf.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(second($"a"), second($"b"), second($"c")),
Row(0, 15, 15))
checkAnswer(
df.selectExpr("second(a)", "second(b)", "second(c)"),
Row(0, 15, 15))
}
test("weekofyear") {
val df = Seq((d, sdfDate.format(d), ts)).toDF("a", "b", "c")
checkAnswer(
df.select(weekofyear($"a"), weekofyear($"b"), weekofyear($"c")),
Row(15, 15, 15))
checkAnswer(
df.selectExpr("weekofyear(a)", "weekofyear(b)", "weekofyear(c)"),
Row(15, 15, 15))
}
test("function date_add") {
val st1 = "2015-06-01 12:34:56"
val st2 = "2015-06-02 12:34:56"
val t1 = Timestamp.valueOf(st1)
val t2 = Timestamp.valueOf(st2)
val s1 = "2015-06-01"
val s2 = "2015-06-02"
val d1 = Date.valueOf(s1)
val d2 = Date.valueOf(s2)
val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss")
checkAnswer(
df.select(date_add(col("d"), 1)),
Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03"))))
checkAnswer(
df.select(date_add(col("t"), 3)),
Seq(Row(Date.valueOf("2015-06-04")), Row(Date.valueOf("2015-06-05"))))
checkAnswer(
df.select(date_add(col("s"), 5)),
Seq(Row(Date.valueOf("2015-06-06")), Row(Date.valueOf("2015-06-07"))))
checkAnswer(
df.select(date_add(col("ss"), 7)),
Seq(Row(Date.valueOf("2015-06-08")), Row(Date.valueOf("2015-06-09"))))
checkAnswer(df.selectExpr("DATE_ADD(null, 1)"), Seq(Row(null), Row(null)))
checkAnswer(
df.selectExpr("""DATE_ADD(d, 1)"""),
Seq(Row(Date.valueOf("2015-06-02")), Row(Date.valueOf("2015-06-03"))))
}
test("function date_sub") {
val st1 = "2015-06-01 12:34:56"
val st2 = "2015-06-02 12:34:56"
val t1 = Timestamp.valueOf(st1)
val t2 = Timestamp.valueOf(st2)
val s1 = "2015-06-01"
val s2 = "2015-06-02"
val d1 = Date.valueOf(s1)
val d2 = Date.valueOf(s2)
val df = Seq((t1, d1, s1, st1), (t2, d2, s2, st2)).toDF("t", "d", "s", "ss")
checkAnswer(
df.select(date_sub(col("d"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("t"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("s"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(col("ss"), 1)),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
checkAnswer(
df.select(date_sub(lit(null), 1)).limit(1), Row(null))
checkAnswer(df.selectExpr("""DATE_SUB(d, null)"""), Seq(Row(null), Row(null)))
checkAnswer(
df.selectExpr("""DATE_SUB(d, 1)"""),
Seq(Row(Date.valueOf("2015-05-31")), Row(Date.valueOf("2015-06-01"))))
}
test("time_add") {
val t1 = Timestamp.valueOf("2015-07-31 23:59:59")
val t2 = Timestamp.valueOf("2015-12-31 00:00:00")
val d1 = Date.valueOf("2015-07-31")
val d2 = Date.valueOf("2015-12-31")
val i = new CalendarInterval(2, 2000000L)
val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d")
checkAnswer(
df.selectExpr(s"d + $i"),
Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2016-02-29"))))
checkAnswer(
df.selectExpr(s"t + $i"),
Seq(Row(Timestamp.valueOf("2015-10-01 00:00:01")),
Row(Timestamp.valueOf("2016-02-29 00:00:02"))))
}
test("time_sub") {
val t1 = Timestamp.valueOf("2015-10-01 00:00:01")
val t2 = Timestamp.valueOf("2016-02-29 00:00:02")
val d1 = Date.valueOf("2015-09-30")
val d2 = Date.valueOf("2016-02-29")
val i = new CalendarInterval(2, 2000000L)
val df = Seq((1, t1, d1), (3, t2, d2)).toDF("n", "t", "d")
checkAnswer(
df.selectExpr(s"d - $i"),
Seq(Row(Date.valueOf("2015-07-30")), Row(Date.valueOf("2015-12-30"))))
checkAnswer(
df.selectExpr(s"t - $i"),
Seq(Row(Timestamp.valueOf("2015-07-31 23:59:59")),
Row(Timestamp.valueOf("2015-12-31 00:00:00"))))
}
test("function add_months") {
val d1 = Date.valueOf("2015-08-31")
val d2 = Date.valueOf("2015-02-28")
val df = Seq((1, d1), (2, d2)).toDF("n", "d")
checkAnswer(
df.select(add_months(col("d"), 1)),
Seq(Row(Date.valueOf("2015-09-30")), Row(Date.valueOf("2015-03-31"))))
checkAnswer(
df.selectExpr("add_months(d, -1)"),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-01-31"))))
}
test("function months_between") {
val d1 = Date.valueOf("2015-07-31")
val d2 = Date.valueOf("2015-02-16")
val t1 = Timestamp.valueOf("2014-09-30 23:30:00")
val t2 = Timestamp.valueOf("2015-09-16 12:00:00")
val s1 = "2014-09-15 11:30:00"
val s2 = "2015-10-01 00:00:00"
val df = Seq((t1, d1, s1), (t2, d2, s2)).toDF("t", "d", "s")
checkAnswer(df.select(months_between(col("t"), col("d"))), Seq(Row(-10.0), Row(7.0)))
checkAnswer(df.selectExpr("months_between(t, s)"), Seq(Row(0.5), Row(-0.5)))
checkAnswer(df.selectExpr("months_between(t, s, true)"), Seq(Row(0.5), Row(-0.5)))
Seq(true, false).foreach { roundOff =>
checkAnswer(df.select(months_between(col("t"), col("d"), roundOff)),
Seq(Row(-10.0), Row(7.0)))
checkAnswer(df.withColumn("r", lit(false)).selectExpr("months_between(t, s, r)"),
Seq(Row(0.5), Row(-0.5)))
}
}
test("function last_day") {
val df1 = Seq((1, "2015-07-23"), (2, "2015-07-24")).toDF("i", "d")
val df2 = Seq((1, "2015-07-23 00:11:22"), (2, "2015-07-24 11:22:33")).toDF("i", "t")
checkAnswer(
df1.select(last_day(col("d"))),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31"))))
checkAnswer(
df2.select(last_day(col("t"))),
Seq(Row(Date.valueOf("2015-07-31")), Row(Date.valueOf("2015-07-31"))))
}
test("function next_day") {
val df1 = Seq(("mon", "2015-07-23"), ("tuesday", "2015-07-20")).toDF("dow", "d")
val df2 = Seq(("th", "2015-07-23 00:11:22"), ("xx", "2015-07-24 11:22:33")).toDF("dow", "t")
checkAnswer(
df1.select(next_day(col("d"), "MONDAY")),
Seq(Row(Date.valueOf("2015-07-27")), Row(Date.valueOf("2015-07-27"))))
checkAnswer(
df2.select(next_day(col("t"), "th")),
Seq(Row(Date.valueOf("2015-07-30")), Row(Date.valueOf("2015-07-30"))))
}
test("function to_date") {
val d1 = Date.valueOf("2015-07-22")
val d2 = Date.valueOf("2015-07-01")
val d3 = Date.valueOf("2014-12-31")
val t1 = Timestamp.valueOf("2015-07-22 10:00:00")
val t2 = Timestamp.valueOf("2014-12-31 23:59:59")
val t3 = Timestamp.valueOf("2014-12-31 23:59:59")
val s1 = "2015-07-22 10:00:00"
val s2 = "2014-12-31"
val s3 = "2014-31-12"
val df = Seq((d1, t1, s1), (d2, t2, s2), (d3, t3, s3)).toDF("d", "t", "s")
checkAnswer(
df.select(to_date(col("t"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("d"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("s"))),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null)))
checkAnswer(
df.selectExpr("to_date(t)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.selectExpr("to_date(d)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.selectExpr("to_date(s)"),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")), Row(null)))
// now with format
checkAnswer(
df.select(to_date(col("t"), "yyyy-MM-dd")),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2014-12-31")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("d"), "yyyy-MM-dd")),
Seq(Row(Date.valueOf("2015-07-22")), Row(Date.valueOf("2015-07-01")),
Row(Date.valueOf("2014-12-31"))))
checkAnswer(
df.select(to_date(col("s"), "yyyy-MM-dd")),
Seq(Row(null), Row(Date.valueOf("2014-12-31")), Row(null)))
// now switch format
checkAnswer(
df.select(to_date(col("s"), "yyyy-dd-MM")),
Seq(Row(null), Row(null), Row(Date.valueOf("2014-12-31"))))
// invalid format
checkAnswer(
df.select(to_date(col("s"), "yyyy-hh-MM")),
Seq(Row(null), Row(null), Row(null)))
checkAnswer(
df.select(to_date(col("s"), "yyyy-dd-aa")),
Seq(Row(null), Row(null), Row(null)))
// february
val x1 = "2016-02-29"
val x2 = "2017-02-29"
val df1 = Seq(x1, x2).toDF("x")
checkAnswer(
df1.select(to_date(col("x"))), Row(Date.valueOf("2016-02-29")) :: Row(null) :: Nil)
}
test("function trunc") {
val df = Seq(
(1, Timestamp.valueOf("2015-07-22 10:00:00")),
(2, Timestamp.valueOf("2014-12-31 00:00:00"))).toDF("i", "t")
checkAnswer(
df.select(trunc(col("t"), "YY")),
Seq(Row(Date.valueOf("2015-01-01")), Row(Date.valueOf("2014-01-01"))))
checkAnswer(
df.selectExpr("trunc(t, 'Month')"),
Seq(Row(Date.valueOf("2015-07-01")), Row(Date.valueOf("2014-12-01"))))
}
test("function date_trunc") {
val df = Seq(
(1, Timestamp.valueOf("2015-07-22 10:01:40.523")),
(2, Timestamp.valueOf("2014-12-31 05:29:06.876"))).toDF("i", "t")
checkAnswer(
df.select(date_trunc("YY", col("t"))),
Seq(Row(Timestamp.valueOf("2015-01-01 00:00:00")),
Row(Timestamp.valueOf("2014-01-01 00:00:00"))))
checkAnswer(
df.selectExpr("date_trunc('MONTH', t)"),
Seq(Row(Timestamp.valueOf("2015-07-01 00:00:00")),
Row(Timestamp.valueOf("2014-12-01 00:00:00"))))
checkAnswer(
df.selectExpr("date_trunc('DAY', t)"),
Seq(Row(Timestamp.valueOf("2015-07-22 00:00:00")),
Row(Timestamp.valueOf("2014-12-31 00:00:00"))))
checkAnswer(
df.selectExpr("date_trunc('HOUR', t)"),
Seq(Row(Timestamp.valueOf("2015-07-22 10:00:00")),
Row(Timestamp.valueOf("2014-12-31 05:00:00"))))
checkAnswer(
df.selectExpr("date_trunc('MINUTE', t)"),
Seq(Row(Timestamp.valueOf("2015-07-22 10:01:00")),
Row(Timestamp.valueOf("2014-12-31 05:29:00"))))
checkAnswer(
df.selectExpr("date_trunc('SECOND', t)"),
Seq(Row(Timestamp.valueOf("2015-07-22 10:01:40")),
Row(Timestamp.valueOf("2014-12-31 05:29:06"))))
checkAnswer(
df.selectExpr("date_trunc('WEEK', t)"),
Seq(Row(Timestamp.valueOf("2015-07-20 00:00:00")),
Row(Timestamp.valueOf("2014-12-29 00:00:00"))))
checkAnswer(
df.selectExpr("date_trunc('QUARTER', t)"),
Seq(Row(Timestamp.valueOf("2015-07-01 00:00:00")),
Row(Timestamp.valueOf("2014-10-01 00:00:00"))))
}
test("from_unixtime") {
val sdf1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
val fmt3 = "yy-MM-dd HH-mm-ss"
val sdf3 = new SimpleDateFormat(fmt3, Locale.US)
val df = Seq((1000, "yyyy-MM-dd HH:mm:ss.SSS"), (-1000, "yy-MM-dd HH-mm-ss")).toDF("a", "b")
checkAnswer(
df.select(from_unixtime(col("a"))),
Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000)))))
checkAnswer(
df.select(from_unixtime(col("a"), fmt2)),
Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000)))))
checkAnswer(
df.select(from_unixtime(col("a"), fmt3)),
Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr("from_unixtime(a)"),
Seq(Row(sdf1.format(new Timestamp(1000000))), Row(sdf1.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr(s"from_unixtime(a, '$fmt2')"),
Seq(Row(sdf2.format(new Timestamp(1000000))), Row(sdf2.format(new Timestamp(-1000000)))))
checkAnswer(
df.selectExpr(s"from_unixtime(a, '$fmt3')"),
Seq(Row(sdf3.format(new Timestamp(1000000))), Row(sdf3.format(new Timestamp(-1000000)))))
}
private def secs(millis: Long): Long = TimeUnit.MILLISECONDS.toSeconds(millis)
test("unix_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.select(unix_timestamp(col("ts"))), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.select(unix_timestamp(col("ss"))), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.select(unix_timestamp(col("d"), fmt)), Seq(
Row(secs(date1.getTime)), Row(secs(date2.getTime))))
checkAnswer(df.select(unix_timestamp(col("s"), fmt)), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.selectExpr("unix_timestamp(ts)"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.selectExpr("unix_timestamp(ss)"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.selectExpr(s"unix_timestamp(d, '$fmt')"), Seq(
Row(secs(date1.getTime)), Row(secs(date2.getTime))))
checkAnswer(df.selectExpr(s"unix_timestamp(s, '$fmt')"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
val x1 = "2015-07-24 10:00:00"
val x2 = "2015-25-07 02:02:02"
val x3 = "2015-07-24 25:02:02"
val x4 = "2015-24-07 26:02:02"
val ts3 = Timestamp.valueOf("2015-07-24 02:25:02")
val ts4 = Timestamp.valueOf("2015-07-24 00:10:00")
val df1 = Seq(x1, x2, x3, x4).toDF("x")
checkAnswer(df1.select(unix_timestamp(col("x"))), Seq(
Row(secs(ts1.getTime)), Row(null), Row(null), Row(null)))
checkAnswer(df1.selectExpr("unix_timestamp(x)"), Seq(
Row(secs(ts1.getTime)), Row(null), Row(null), Row(null)))
checkAnswer(df1.select(unix_timestamp(col("x"), "yyyy-dd-MM HH:mm:ss")), Seq(
Row(null), Row(secs(ts2.getTime)), Row(null), Row(null)))
checkAnswer(df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq(
Row(secs(ts4.getTime)), Row(null), Row(secs(ts3.getTime)), Row(null)))
// invalid format
checkAnswer(df1.selectExpr(s"unix_timestamp(x, 'yyyy-MM-dd aa:HH:ss')"), Seq(
Row(null), Row(null), Row(null), Row(null)))
// february
val y1 = "2016-02-29"
val y2 = "2017-02-29"
val ts5 = Timestamp.valueOf("2016-02-29 00:00:00")
val df2 = Seq(y1, y2).toDF("y")
checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq(
Row(secs(ts5.getTime)), Row(null)))
val now = sql("select unix_timestamp()").collect().head.getLong(0)
checkAnswer(
sql(s"select cast ($now as timestamp)"),
Row(new java.util.Date(TimeUnit.SECONDS.toMillis(now))))
}
test("to_unix_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.selectExpr("to_unix_timestamp(ts)"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.selectExpr("to_unix_timestamp(ss)"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
checkAnswer(df.selectExpr(s"to_unix_timestamp(d, '$fmt')"), Seq(
Row(secs(date1.getTime)), Row(secs(date2.getTime))))
checkAnswer(df.selectExpr(s"to_unix_timestamp(s, '$fmt')"), Seq(
Row(secs(ts1.getTime)), Row(secs(ts2.getTime))))
val x1 = "2015-07-24 10:00:00"
val x2 = "2015-25-07 02:02:02"
val x3 = "2015-07-24 25:02:02"
val x4 = "2015-24-07 26:02:02"
val ts3 = Timestamp.valueOf("2015-07-24 02:25:02")
val ts4 = Timestamp.valueOf("2015-07-24 00:10:00")
val df1 = Seq(x1, x2, x3, x4).toDF("x")
checkAnswer(df1.selectExpr("to_unix_timestamp(x)"), Seq(
Row(secs(ts1.getTime)), Row(null), Row(null), Row(null)))
checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq(
Row(secs(ts4.getTime)), Row(null), Row(secs(ts3.getTime)), Row(null)))
// february
val y1 = "2016-02-29"
val y2 = "2017-02-29"
val ts5 = Timestamp.valueOf("2016-02-29 00:00:00")
val df2 = Seq(y1, y2).toDF("y")
checkAnswer(df2.select(unix_timestamp(col("y"), "yyyy-MM-dd")), Seq(
Row(secs(ts5.getTime)), Row(null)))
// invalid format
checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd bb:HH:ss')"), Seq(
Row(null), Row(null), Row(null), Row(null)))
}
test("to_timestamp") {
val date1 = Date.valueOf("2015-07-24")
val date2 = Date.valueOf("2015-07-25")
val ts_date1 = Timestamp.valueOf("2015-07-24 00:00:00")
val ts_date2 = Timestamp.valueOf("2015-07-25 00:00:00")
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ts1m = Timestamp.valueOf("2015-07-24 10:00:00.5")
val ts2m = Timestamp.valueOf("2015-07-25 02:02:02.6")
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
val df = Seq((date1, ts1, s1, ss1), (date2, ts2, s2, ss2)).toDF("d", "ts", "s", "ss")
checkAnswer(df.select(to_timestamp(col("ss"))),
df.select(unix_timestamp(col("ss")).cast("timestamp")))
checkAnswer(df.select(to_timestamp(col("ss"))), Seq(
Row(ts1), Row(ts2)))
checkAnswer(df.select(to_timestamp(col("s"), fmt)), Seq(
Row(ts1m), Row(ts2m)))
checkAnswer(df.select(to_timestamp(col("ts"), fmt)), Seq(
Row(ts1), Row(ts2)))
checkAnswer(df.select(to_timestamp(col("d"), "yyyy-MM-dd")), Seq(
Row(ts_date1), Row(ts_date2)))
}
test("datediff") {
val df = Seq(
(Date.valueOf("2015-07-24"), Timestamp.valueOf("2015-07-24 01:00:00"),
"2015-07-23", "2015-07-23 03:00:00"),
(Date.valueOf("2015-07-25"), Timestamp.valueOf("2015-07-25 02:00:00"),
"2015-07-24", "2015-07-24 04:00:00")
).toDF("a", "b", "c", "d")
checkAnswer(df.select(datediff(col("a"), col("b"))), Seq(Row(0), Row(0)))
checkAnswer(df.select(datediff(col("a"), col("c"))), Seq(Row(1), Row(1)))
checkAnswer(df.select(datediff(col("d"), col("b"))), Seq(Row(-1), Row(-1)))
checkAnswer(df.selectExpr("datediff(a, d)"), Seq(Row(1), Row(1)))
}
test("from_utc_timestamp with literal zone") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00")
).toDF("a", "b")
withSQLConf(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key -> "true") {
checkAnswer(
df.select(from_utc_timestamp(col("a"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-23 17:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
checkAnswer(
df.select(from_utc_timestamp(col("b"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-23 17:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
}
val msg = intercept[AnalysisException] {
df.select(from_utc_timestamp(col("a"), "PST")).collect()
}.getMessage
assert(msg.contains(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key))
}
test("from_utc_timestamp with column zone") {
withSQLConf(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key -> "true") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00", "CET"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00", "PST")
).toDF("a", "b", "c")
checkAnswer(
df.select(from_utc_timestamp(col("a"), col("c"))),
Seq(
Row(Timestamp.valueOf("2015-07-24 02:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
checkAnswer(
df.select(from_utc_timestamp(col("b"), col("c"))),
Seq(
Row(Timestamp.valueOf("2015-07-24 02:00:00")),
Row(Timestamp.valueOf("2015-07-24 17:00:00"))))
}
}
test("to_utc_timestamp with literal zone") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00")
).toDF("a", "b")
withSQLConf(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key -> "true") {
checkAnswer(
df.select(to_utc_timestamp(col("a"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-25 07:00:00"))))
checkAnswer(
df.select(to_utc_timestamp(col("b"), "PST")),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-25 07:00:00"))))
}
val msg = intercept[AnalysisException] {
df.select(to_utc_timestamp(col("a"), "PST")).collect()
}.getMessage
assert(msg.contains(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key))
}
test("to_utc_timestamp with column zone") {
withSQLConf(SQLConf.UTC_TIMESTAMP_FUNC_ENABLED.key -> "true") {
val df = Seq(
(Timestamp.valueOf("2015-07-24 00:00:00"), "2015-07-24 00:00:00", "PST"),
(Timestamp.valueOf("2015-07-25 00:00:00"), "2015-07-25 00:00:00", "CET")
).toDF("a", "b", "c")
checkAnswer(
df.select(to_utc_timestamp(col("a"), col("c"))),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-24 22:00:00"))))
checkAnswer(
df.select(to_utc_timestamp(col("b"), col("c"))),
Seq(
Row(Timestamp.valueOf("2015-07-24 07:00:00")),
Row(Timestamp.valueOf("2015-07-24 22:00:00"))))
}
}
test("to_timestamp with microseconds precision") {
withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") {
val timestamp = "1970-01-01T00:00:00.123456Z"
val df = Seq(timestamp).toDF("t")
checkAnswer(df.select(to_timestamp($"t", "yyyy-MM-dd'T'HH:mm:ss.SSSSSSX")),
Seq(Row(Instant.parse(timestamp))))
}
}
}
|
aosagie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala
|
Scala
|
apache-2.0
| 29,015
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.checkpoint.kafka
import java.util.Properties
import kafka.admin.AdminUtils
import kafka.integration.KafkaServerTestHarness
import kafka.server.ConfigType
import kafka.utils.{CoreUtils, TestUtils, ZkUtils}
import com.google.common.collect.ImmutableMap
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.apache.samza.checkpoint.Checkpoint
import org.apache.samza.config._
import org.apache.samza.container.TaskName
import org.apache.samza.container.grouper.stream.GroupByPartitionFactory
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.serializers.CheckpointSerde
import org.apache.samza.system._
import org.apache.samza.system.kafka.{KafkaStreamSpec, KafkaSystemFactory}
import org.apache.samza.util.{KafkaUtilException, NoOpMetricsRegistry, Util}
import org.apache.samza.{Partition, SamzaException}
import org.junit.Assert._
import org.junit._
import org.mockito.Mockito
class TestKafkaCheckpointManager extends KafkaServerTestHarness {
protected def numBrokers: Int = 3
val checkpointSystemName = "kafka"
val sspGrouperFactoryName = classOf[GroupByPartitionFactory].getCanonicalName
val ssp = new SystemStreamPartition("kafka", "topic", new Partition(0))
val checkpoint1 = new Checkpoint(ImmutableMap.of(ssp, "offset-1"))
val checkpoint2 = new Checkpoint(ImmutableMap.of(ssp, "offset-2"))
val taskName = new TaskName("Partition 0")
var config: Config = null
@Before
override def setUp {
super.setUp
TestUtils.waitUntilTrue(() => servers.head.metadataCache.getAliveBrokers.size == numBrokers, "Wait for cache to update")
config = getConfig()
}
override def generateConfigs() = {
val props = TestUtils.createBrokerConfigs(numBrokers, zkConnect, true)
// do not use relative imports
props.map(_root_.kafka.server.KafkaConfig.fromProps)
}
@Test
def testCheckpointShouldBeNullIfCheckpointTopicDoesNotExistShouldBeCreatedOnWriteAndShouldBeReadableAfterWrite {
val checkpointTopic = "checkpoint-topic-1"
val kcm1 = createKafkaCheckpointManager(checkpointTopic)
kcm1.register(taskName)
kcm1.createResources
kcm1.start
kcm1.stop
// check that start actually creates the topic with log compaction enabled
val zkClient = ZkUtils(zkConnect, 6000, 6000, JaasUtils.isZkSecurityEnabled())
val topicConfig = AdminUtils.fetchEntityConfig(zkClient, ConfigType.Topic, checkpointTopic)
assertEquals(topicConfig, new KafkaConfig(config).getCheckpointTopicProperties())
assertEquals("compact", topicConfig.get("cleanup.policy"))
assertEquals("26214400", topicConfig.get("segment.bytes"))
zkClient.close
// read before topic exists should result in a null checkpoint
val readCp = readCheckpoint(checkpointTopic, taskName)
assertNull(readCp)
writeCheckpoint(checkpointTopic, taskName, checkpoint1)
assertEquals(checkpoint1, readCheckpoint(checkpointTopic, taskName))
// writing a second message and reading it returns a more recent checkpoint
writeCheckpoint(checkpointTopic, taskName, checkpoint2)
assertEquals(checkpoint2, readCheckpoint(checkpointTopic, taskName))
}
@Test(expected = classOf[SamzaException])
def testWriteCheckpointShouldRetryFiniteTimesOnFailure: Unit = {
val checkpointTopic = "checkpoint-topic-2"
val mockKafkaProducer: SystemProducer = Mockito.mock(classOf[SystemProducer])
class MockSystemFactory extends KafkaSystemFactory {
override def getProducer(systemName: String, config: Config, registry: MetricsRegistry): SystemProducer = {
mockKafkaProducer
}
}
Mockito.doThrow(new RuntimeException()).when(mockKafkaProducer).flush(taskName.getTaskName)
val props = new org.apache.samza.config.KafkaConfig(config).getCheckpointTopicProperties()
val spec = new KafkaStreamSpec("id", checkpointTopic, checkpointSystemName, 1, 1, false, props)
val checkPointManager = new KafkaCheckpointManager(spec, new MockSystemFactory, false, config, new NoOpMetricsRegistry)
checkPointManager.MaxRetryDurationMs = 1
checkPointManager.register(taskName)
checkPointManager.start
checkPointManager.writeCheckpoint(taskName, new Checkpoint(ImmutableMap.of()))
}
@Test
def testFailOnTopicValidation {
// By default, should fail if there is a topic validation error
val checkpointTopic = "eight-partition-topic";
val kcm1 = createKafkaCheckpointManager(checkpointTopic)
kcm1.register(taskName)
// create topic with the wrong number of partitions
createTopic(checkpointTopic, 8, new KafkaConfig(config).getCheckpointTopicProperties())
try {
kcm1.createResources
kcm1.start
fail("Expected an exception for invalid number of partitions in the checkpoint topic.")
} catch {
case e: StreamValidationException => None
}
kcm1.stop
// Should not fail if failOnTopicValidation = false
val failOnTopicValidation = false
val kcm2 = createKafkaCheckpointManager(checkpointTopic, new CheckpointSerde, failOnTopicValidation)
kcm2.register(taskName)
try {
kcm2.start
} catch {
case e: KafkaUtilException => fail("Unexpected exception for invalid number of partitions in the checkpoint topic")
}
kcm2.stop
}
@After
override def tearDown() {
if (servers != null) {
servers.foreach(_.shutdown())
servers.foreach(server => CoreUtils.delete(server.config.logDirs))
}
super.tearDown
}
private def getCheckpointProducerProperties() : Properties = {
val defaultSerializer = classOf[ByteArraySerializer].getCanonicalName
val props = new Properties()
props.putAll(ImmutableMap.of(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList,
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, defaultSerializer,
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, defaultSerializer))
props
}
private def getConfig() : Config = {
new MapConfig(new ImmutableMap.Builder[String, String]()
.put(JobConfig.JOB_NAME, "some-job-name")
.put(JobConfig.JOB_ID, "i001")
.put(s"systems.$checkpointSystemName.samza.factory", classOf[KafkaSystemFactory].getCanonicalName)
.put(s"systems.$checkpointSystemName.producer.bootstrap.servers", brokerList)
.put(s"systems.$checkpointSystemName.consumer.zookeeper.connect", zkConnect)
.put("task.checkpoint.system", checkpointSystemName)
.build())
}
private def createKafkaCheckpointManager(cpTopic: String, serde: CheckpointSerde = new CheckpointSerde, failOnTopicValidation: Boolean = true) = {
val kafkaConfig = new org.apache.samza.config.KafkaConfig(config)
val props = kafkaConfig.getCheckpointTopicProperties()
val systemName = kafkaConfig.getCheckpointSystem.getOrElse(
throw new SamzaException("No system defined for Kafka's checkpoint manager."))
val systemFactoryClassName = new SystemConfig(config)
.getSystemFactory(systemName)
.getOrElse(throw new SamzaException("Missing configuration: " + SystemConfig.SYSTEM_FACTORY format systemName))
val systemFactory = Util.getObj[SystemFactory](systemFactoryClassName)
val spec = new KafkaStreamSpec("id", cpTopic, checkpointSystemName, 1, 1, false, props)
new KafkaCheckpointManager(spec, systemFactory, failOnTopicValidation, config, new NoOpMetricsRegistry, serde)
}
private def readCheckpoint(checkpointTopic: String, taskName: TaskName) : Checkpoint = {
val kcm = createKafkaCheckpointManager(checkpointTopic)
kcm.register(taskName)
kcm.start
val checkpoint = kcm.readLastCheckpoint(taskName)
kcm.stop
checkpoint
}
private def writeCheckpoint(checkpointTopic: String, taskName: TaskName, checkpoint: Checkpoint) = {
val kcm = createKafkaCheckpointManager(checkpointTopic)
kcm.register(taskName)
kcm.start
kcm.writeCheckpoint(taskName, checkpoint)
}
private def createTopic(cpTopic: String, partNum: Int, props: Properties) = {
val zkClient = ZkUtils(zkConnect, 6000, 6000, JaasUtils.isZkSecurityEnabled())
try {
AdminUtils.createTopic(
zkClient,
cpTopic,
partNum,
1,
props)
} catch {
case e: Exception => println(e.getMessage)
} finally {
zkClient.close
}
}
}
|
TiVo/samza
|
samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala
|
Scala
|
apache-2.0
| 9,225
|
package dispatch.cloudapp
object DispatchContrib {
import com.ning.http.client.{
AsyncCompletionHandler, RequestBuilder, Response, Realm
}
import dispatch._
trait FileVerbs extends RequestVerbs {
import java.io.File
def <<< (file: File) =
subject.PUT.setBody(file)
def << (file: File) =
subject.POST.setBody(file)
}
trait ExtraAuthVerbs extends RequestVerbs {
import com.ning.http.client.Realm.RealmBuilder
def digest (user: String, password: String, realm: Option[String] = None) =
subject.setRealm(new RealmBuilder()
.setPrincipal(user)
.setPassword(password)
//.setUsePreemptiveAuth(false)
//.setScheme(Realm.AuthScheme.DIGEST)
.build())
}
object ProgressVerbs {
import Progress._
class WriteProgressHandler[T](f: Response => T)(p: Listener)
extends FunctionHandler[T](f) {
override def onContentWriteProgress(amount: Long, current: Long, total: Long) = {
p(amount, current, total)
super.onContentWriteProgress(amount, current, total)
}
}
}
object Progress {
import ProgressVerbs._
type Listener = (Long, Long, Long) => Unit
def apply[T](f: Response => T)(p: Listener) =
new WriteProgressHandler(f)(p)
}
trait DebugParams extends RequestVerbs {
def debugp(ps: Traversable[(String, String)]) = {
println("adding query params")
(subject /: ps) {
case (s, (key, value)) =>
s.addQueryParameter(key, value)
}
}
}
trait HeaderVerbs extends RequestVerbs {
def headers(hs: Traversable[(String, String)]) =
(subject /: hs) {
case (s, (key, value)) =>
s.addHeader(key, value)
}
def <:< (hs: Traversable[(String, String)]) =
headers(hs)
}
class ContribVerbs(sub: RequestBuilder)
extends DefaultRequestVerbs(sub)
with HeaderVerbs
with FileVerbs
with ExtraAuthVerbs
with DebugParams
}
|
softprops/dispatch-cloudapp
|
src/main/scala/contrib.scala
|
Scala
|
mit
| 2,033
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.concurrent
import akka.actor.ActorSystem
import scala.concurrent.{ Future, TimeoutException }
import scala.concurrent.duration.FiniteDuration
/**
* This trait is used to provide a non-blocking timeout on an operation that returns a Future.
*
* Please note that the [[play.api.Application]] default ActorSystem should
* be used as input here, as the actorSystem.scheduler is responsible for scheduling
* the timeout, using <a href="http://doc.akka.io/docs/akka/current/scala/futures.html#After">akka.pattern.actor</a> under the hood.
*
* You can dependency inject the ActorSystem as follows to create a Future that will
* timeout after a certain period of time:
*
* {{{
* class MyService @Inject()(actorSystem: ActorSystem) extends Timeout {
*
* def calculateWithTimeout(timeoutDuration: FiniteDuration): Future[Int] = {
* timeout(actorSystem, timeoutDuration)(rawCalculation())
* }
*
* def rawCalculation(): Future[Int] = {
* import akka.pattern.after
* implicit val ec = actorSystem.dispatcher
* akka.pattern.after(300 millis, actorSystem.scheduler)(Future(42))(actorSystem.dispatcher)
* }
* }
* }}}
*
* You should check for timeout by using `scala.concurrent.Future.recover` or `scala.concurrent.Future.recoverWith`
* and checking for [[scala.concurrent.TimeoutException]]:
*
* {{{
* val future = myService.calculateWithTimeout(100 millis).recover {
* case _: TimeoutException =>
* -1
* }
* }}}
*
* @see [[http://docs.scala-lang.org/overviews/core/futures.html Futures and Promises]]
*
*/
trait Timeout {
/**
* Creates a future which will resolve to a timeout exception if the
* given Future has not successfully completed within timeoutDuration.
*
* Note that timeout is not the same as cancellation. Even in case of timeout,
* the given future will still complete, even though that completed value
* is not returned.
*
* @tparam A the result type used in the Future.
* @param actorSystem the application's actor system.
* @param timeoutDuration the duration after which a Future.failed(TimeoutException) should be thrown.
* @param f a call by value Future[A]
* @return the future that completes first, either the failed future, or the operation.
*/
def timeout[A](actorSystem: ActorSystem, timeoutDuration: FiniteDuration)(f: Future[A]): Future[A] = {
implicit val ec = actorSystem.dispatchers.defaultGlobalDispatcher
val timeoutFuture = akka.pattern.after(timeoutDuration, actorSystem.scheduler) {
val msg = s"Timeout after $timeoutDuration"
Future.failed(new TimeoutException(msg))
}
Future.firstCompletedOf(Seq(f, timeoutFuture))
}
}
/**
* This is a static object that can be used to import timeout implicits, as a convenience.
*
* {{{
* import play.api.libs.concurrent.Timeout._
* }}}
*/
object Timeout extends Timeout with LowPriorityTimeoutImplicits
/**
* Low priority timeouts to add `withTimeout` methods to [[scala.concurrent.Future]].
*/
trait LowPriorityTimeoutImplicits {
implicit class FutureTimeout[T](future: Future[T]) extends Timeout {
/**
* Creates a future which will resolve to a timeout exception if the
* given [[scala.concurrent.Future]] has not successfully completed within timeoutDuration.
*
* Note that timeout is not the same as cancellation. Even in case of timeout,
* the given future will still complete, even though that completed value
* is not returned.
*
* @param timeoutDuration the duration after which a Future.failed(TimeoutException) should be thrown.
* @param actorSystem the application's actor system.
* @return the future that completes first, either the failed future, or the operation.
*/
def withTimeout(timeoutDuration: FiniteDuration)(implicit actorSystem: ActorSystem): Future[T] = {
timeout(actorSystem, timeoutDuration)(future)
}
/**
* Creates a future which will resolve to a timeout exception if the
* given Future has not successfully completed within timeoutDuration.
*
* This version uses an implicit [[akka.util.Timeout]] rather than a [[scala.concurrent.duration.FiniteDuration]].
*
* Note that timeout is not the same as cancellation. Even in case of timeout,
* the given future will still complete, even though that completed value
* is not returned.
*
* @param timeoutDuration the duration after which a Future.failed(TimeoutException) should be thrown.
* @param actorSystem the application's actor system.
* @return the future that completes first, either the failed future, or the operation.
*/
def withTimeout(implicit timeoutDuration: akka.util.Timeout, actorSystem: ActorSystem): Future[T] = {
timeout(actorSystem, timeoutDuration.duration)(future)
}
}
}
|
aradchykov/playframework
|
framework/src/play/src/main/scala/play/api/libs/concurrent/Timeout.scala
|
Scala
|
apache-2.0
| 4,946
|
package com.falmarri.finagle
import scala.util.Random
import scala.collection.immutable.StringOps
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.twitter.finagle.Service
import com.twitter.finagle.exp.Mysql
import com.twitter.finagle.exp.mysql._
import org.jboss.netty.handler.codec.http._
import org.jboss.netty.handler.codec.http.HttpResponseStatus._
import org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1
import org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer
import com.twitter.util.Future
import java.net.InetSocketAddress
import com.twitter.finagle.builder.{Server, ServerBuilder}
import com.twitter.finagle.http.{Http,HttpMuxer}
object FinagleBenchmark extends App {
val maxConnections = 256
//val mysql = new Client(ClientBuilder()
// .codec(new MySQL("benchmarkdbuser", "benchmarkdbpass", Some("hello_world")))
// .hosts(new InetSocketAddress(System.getProperty("db.host", "localhost"), 3306))
// .hostConnectionLimit(maxConnections)
// .buildFactory())
val username = "benchmarkdbuser"
val password = "benchmarkdbpass"
val db = "hello_world"
val host = System.getProperty("db.host", "localhost")
val mysql = Mysql
.withCredentials(username, password)
.withDatabase(db)
.newRichClient(host + ":3306")
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
def getValue(row: Row, name: String): Any = row(name) match {
case Some(IntValue(v)) => v
case _ => throw new Exception("couldn't get value for %s".format(name))
}
def rowToMap(row: Row) = {
Map(
"id" -> getValue(row, "id"),
"randomNumber" -> getValue(row, "randomNumber")
)
}
def serialize(result: Any): Array[Byte] =
mapper.writeValueAsBytes(result)
def createResponse(req: HttpRequest, bytes: Array[Byte]) = {
val body = wrappedBuffer(bytes)
val resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
//resp.setContentTypeJson
resp.setContent(body)
//resp.contentLength = body.readableBytes
resp
}
val muxService = new HttpMuxer()
.withHandler("/json", new Service[HttpRequest, HttpResponse] {
def apply(req: HttpRequest): Future[HttpResponse] =
Future.value(createResponse(req, serialize(Map("message" -> "Hello, World!"))))
})
/*
.withHandler("/db", new Service[HttpRequest, HttpResponse] {
val rand = new Random()
val sql = "SELECT * FROM world WHERE id = "
def apply(req: HttpRequest): Future[HttpResponse] = {
//val n = req.params.getIntOrElse("queries", 1)
val decoder = new QueryStringDecoder(req.getUri())
val n = {
val queries = decoder.getParameters().get("queries")
if(queries == null) {
1
}
else {
queries.get(0).toInt
}
}
val qs = (0 until n) map { i =>
mysql.select(sql + rand.nextInt(10000))(rowToMap)
}
Future.collect(qs) map { results =>
createResponse(req, serialize(results.flatten))
}
}
})
*/
//Http.serve(new InetSocketAddress(8080), HttpMuxer)
val server: Server = ServerBuilder()
.codec(Http())
.bindTo(new InetSocketAddress(8080))
.name("HttpServer")
.build(muxService)
}
|
denkab/FrameworkBenchmarks
|
frameworks/Scala/finagle/src/main/scala/com/falmarri/finagle/Finagle.scala
|
Scala
|
bsd-3-clause
| 3,363
|
import scalaxb.compiler.wsdl11.Driver
import java.io.File
import scalaxb.compiler.Config
import scalaxb.stockquote.server._
object Wsdl11DocumentBareTest extends TestBase with JaxwsTestBase {
override val module = new Driver // with Verbose
def serviceImpl:DocumentLiteralBareService = new DocumentLiteralBareService()
def serviceAddress: String = "document-bare"
step {
startServer
}
val packageName = "stockquote"
val wsdlFile = new File(s"integration/target/$serviceAddress.wsdl")
lazy val generated = {
writeStringToFile(retrieveWsdl, wsdlFile)
module.process(wsdlFile,
Config(packageNames = Map(None -> Some(packageName)),
packageDir = true, outdir = tmp, async = true))
}
"document-bare service works" in {
(List("""import stockquote._
import scala.concurrent._, duration._""",
"""val service = (new DocumentLiteralBareServiceSoapBindings with scalaxb.Soap11ClientsAsync with scalaxb.DispatchHttpClientsAsync {}).service""",
"""val fresponse = service.price(Some("GOOG"))""",
"""val response = Await.result(fresponse, 5 seconds)""",
"""if (response != Some(42.0)) sys.error(response.toString)""",
"""val fresponse2 = service.useHeader(Some("GOOG"))""",
"""val response2 = Await.result(fresponse2, 5 seconds)""",
"""true"""), generated) must evaluateTo(true,
outdir = "./tmp", usecurrentcp = true)
}
step {
stopServer
}
}
|
justjoheinz/scalaxb
|
integration/src/test/scala/Wsdl11DocumentBareTest.scala
|
Scala
|
mit
| 1,453
|
package gitbucket.core.util
import org.scalatest.FunSpec
class DirectorySpec extends FunSpec {
describe("GitBucketHome") {
it("should set under target in test scope") {
assert(Directory.GitBucketHome == new java.io.File("target/gitbucket_home_for_test").getAbsolutePath)
}
}
// test("GitBucketHome should exists"){
// new java.io.File(Directory.GitBucketHome).exists
// }
}
|
McFoggy/gitbucket
|
src/test/scala/gitbucket/core/util/DirectorySpec.scala
|
Scala
|
apache-2.0
| 401
|
package org.openurp.edu.eams.web.action.common
import org.beangle.commons.collection.Collections
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.lang.Strings
import org.openurp.edu.base.Project
import org.openurp.edu.base.Student
abstract class AbstractStudentProjectSupportAction extends MultiProjectSupportAction {
protected override def getProjects(): List[Project] = {
var projects = Collections.newBuffer[Any]
val student = getLoginStudent
if (student.getPerson != null) {
val builder = OqlBuilder.from(classOf[Project], "project").select("select distinct project")
.where("exists(from " + classOf[Student].getName +
" std where std.person.id = :personId and project = std.project and std.graduateOn > current_date())",
student.getPerson.id)
projects = entityDao.search(builder)
} else {
projects.add(student.getProject)
}
projects
}
protected def getProject(): Project = getLoginStudent.getProject
}
|
openurp/edu-eams-webapp
|
web/src/main/scala/org/openurp/edu/eams/web/action/common/AbstractStudentProjectSupportAction.scala
|
Scala
|
gpl-3.0
| 1,009
|
package commands.makemkv
/**
* A trait that, given a list of command line arguments will produce a stream of strings. This is used to
* abstract away running makemkvcon as a native process.
* Created by alex on 05/05/15.
*/
trait MakeMkvConCommand {
def execute(arguments: Seq[String]): Stream[String]
}
|
unclealex72/ripper
|
src/main/scala/commands/makemkv/MakeMkvConCommand.scala
|
Scala
|
mit
| 312
|
// Copyright (C) 2020 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.scenarios
import io.gatling.core.Predef._
import io.gatling.core.feeder.FileBasedFeederBuilder
import io.gatling.core.structure.ScenarioBuilder
class DeleteProject extends ProjectSimulation {
private val data: FileBasedFeederBuilder[Any]#F#F = jsonFile(resource).convert(url).queue
def this(default: String) {
this()
this.default = default
}
val test: ScenarioBuilder = scenario(name)
.feed(data)
.exec(httpRequest)
setUp(
test.inject(
atOnceUsers(1)
)).protocols(httpProtocol)
}
|
qtproject/qtqa-gerrit
|
e2e-tests/src/test/scala/com/google/gerrit/scenarios/DeleteProject.scala
|
Scala
|
apache-2.0
| 1,164
|
package kneelnrise.warp10scala.services
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model.{HttpHeader, HttpRequest, HttpResponse}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Source}
import akka.util.ByteString
import kneelnrise.warp10scala.constants.CharsetConstants
import kneelnrise.warp10scala.model.Warp10Configuration
import kneelnrise.warp10scala.services.Warp10CommonClient.PoolClientFlow
import scala.collection.immutable
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
object Warp10CommonClient {
type PoolClientFlow = Flow[(HttpRequest, String), (Try[HttpResponse], String), _]
def readAllDataBytes(dataBytesSource: Source[ByteString, _])(implicit actorMaterializer: ActorMaterializer): Future[String] = {
implicit val executionContext = actorMaterializer.system.dispatcher
dataBytesSource
.runFold(ByteString.empty) { case (acc, dataBytes) => acc ++ dataBytes }
.map(_.decodeString(CharsetConstants.`UTF-8`))
}
def lineByLine: Flow[ByteString, String, NotUsed] =
Flow[ByteString]
.map(_.decodeString(CharsetConstants.`UTF-8`))
// Force a last EOL when there is no EOL when EOF
// TODO: How to delete the last EOL when already defined?
.intersperse("", "", "\\n")
.scan("") { case (acc, current) =>
if (acc.endsWith("\\n")) {
current
} else if (acc.contains("\\n")) {
acc.substring(acc.lastIndexOf("\\n") + 1) + current
} else {
acc + current
}
}
.filter(_.contains("\\n"))
.map(segment => segment.substring(0, segment.lastIndexOf("\\n")))
.mapConcat(_.split("\\n").to[immutable.Iterable])
.map(line => if (line.startsWith("\\r")) line.drop(1) else line)
.map(line => if (line.endsWith("\\r")) line.dropRight(1) else line)
def lineByLineNoEmpty: Flow[ByteString, String, NotUsed] =
lineByLine.filter(_.trim.nonEmpty)
}
private[services] case class Warp10ClientContext(configuration: Warp10Configuration, poolClientFlow: Warp10CommonClient.PoolClientFlow, actorMaterializer: ActorMaterializer) {
implicit def implicitWarp10Configuration: Warp10Configuration = configuration
implicit def implicitPoolClientFlow: PoolClientFlow = poolClientFlow
implicit def implicitActorMaterializer: ActorMaterializer = actorMaterializer
implicit def implicitActorSystem: ActorSystem = actorMaterializer.system
implicit def implicitExecutionContext: ExecutionContext = actorMaterializer.system.dispatcher
}
case class Warp10Exception(statusCode: Long, error: String) extends Exception(error)
private[services] object `X-Warp10-Token` {
def apply(value: String): HttpHeader = HttpHeader.parse("X-Warp10-Token", value) match {
case ParsingResult.Ok(httpHeader, _) => httpHeader
case ParsingResult.Error(error) => throw Warp10Exception(-1, s"${error.summary}: ${error.detail}")
}
}
|
kneelnrise/warp10-scala
|
src/main/scala/kneelnrise/warp10scala/services/Warp10CommonClient.scala
|
Scala
|
mit
| 3,004
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.JavaConverters._
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.execution.datasources.v2.{DataSourceRDDPartition, RowToUnsafeDataReader}
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousDataReader, PartitionOffset}
import org.apache.spark.util.ThreadUtils
class ContinuousDataSourceRDD(
sc: SparkContext,
sqlContext: SQLContext,
@transient private val readerFactories: java.util.List[DataReaderFactory[UnsafeRow]])
extends RDD[UnsafeRow](sc, Nil) {
private val dataQueueSize = sqlContext.conf.continuousStreamingExecutorQueueSize
private val epochPollIntervalMs = sqlContext.conf.continuousStreamingExecutorPollIntervalMs
override protected def getPartitions: Array[Partition] = {
readerFactories.asScala.zipWithIndex.map {
case (readerFactory, index) => new DataSourceRDDPartition(index, readerFactory)
}.toArray
}
override def compute(split: Partition, context: TaskContext): Iterator[UnsafeRow] = {
// If attempt number isn't 0, this is a task retry, which we don't support.
if (context.attemptNumber() != 0) {
throw new ContinuousTaskRetryException()
}
val reader = split.asInstanceOf[DataSourceRDDPartition[UnsafeRow]]
.readerFactory.createDataReader()
val coordinatorId = context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY)
// This queue contains two types of messages:
// * (null, null) representing an epoch boundary.
// * (row, off) containing a data row and its corresponding PartitionOffset.
val queue = new ArrayBlockingQueue[(UnsafeRow, PartitionOffset)](dataQueueSize)
val epochPollFailed = new AtomicBoolean(false)
val epochPollExecutor = ThreadUtils.newDaemonSingleThreadScheduledExecutor(
s"epoch-poll--$coordinatorId--${context.partitionId()}")
val epochPollRunnable = new EpochPollRunnable(queue, context, epochPollFailed)
epochPollExecutor.scheduleWithFixedDelay(
epochPollRunnable, 0, epochPollIntervalMs, TimeUnit.MILLISECONDS)
// Important sequencing - we must get start offset before the data reader thread begins
val startOffset = ContinuousDataSourceRDD.getBaseReader(reader).getOffset
val dataReaderFailed = new AtomicBoolean(false)
val dataReaderThread = new DataReaderThread(reader, queue, context, dataReaderFailed)
dataReaderThread.setDaemon(true)
dataReaderThread.start()
context.addTaskCompletionListener(_ => {
dataReaderThread.interrupt()
epochPollExecutor.shutdown()
})
val epochEndpoint = EpochCoordinatorRef.get(coordinatorId, SparkEnv.get)
new Iterator[UnsafeRow] {
private val POLL_TIMEOUT_MS = 1000
private var currentEntry: (UnsafeRow, PartitionOffset) = _
private var currentOffset: PartitionOffset = startOffset
private var currentEpoch =
context.getLocalProperty(ContinuousExecution.START_EPOCH_KEY).toLong
override def hasNext(): Boolean = {
while (currentEntry == null) {
if (context.isInterrupted() || context.isCompleted()) {
currentEntry = (null, null)
}
if (dataReaderFailed.get()) {
throw new SparkException("data read failed", dataReaderThread.failureReason)
}
if (epochPollFailed.get()) {
throw new SparkException("epoch poll failed", epochPollRunnable.failureReason)
}
currentEntry = queue.poll(POLL_TIMEOUT_MS, TimeUnit.MILLISECONDS)
}
currentEntry match {
// epoch boundary marker
case (null, null) =>
epochEndpoint.send(ReportPartitionOffset(
context.partitionId(),
currentEpoch,
currentOffset))
currentEpoch += 1
currentEntry = null
false
// real row
case (_, offset) =>
currentOffset = offset
true
}
}
override def next(): UnsafeRow = {
if (currentEntry == null) throw new NoSuchElementException("No current row was set")
val r = currentEntry._1
currentEntry = null
r
}
}
}
override def getPreferredLocations(split: Partition): Seq[String] = {
split.asInstanceOf[DataSourceRDDPartition[UnsafeRow]].readerFactory.preferredLocations()
}
}
case class EpochPackedPartitionOffset(epoch: Long) extends PartitionOffset
class EpochPollRunnable(
queue: BlockingQueue[(UnsafeRow, PartitionOffset)],
context: TaskContext,
failedFlag: AtomicBoolean)
extends Thread with Logging {
private[continuous] var failureReason: Throwable = _
private val epochEndpoint = EpochCoordinatorRef.get(
context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY), SparkEnv.get)
private var currentEpoch = context.getLocalProperty(ContinuousExecution.START_EPOCH_KEY).toLong
override def run(): Unit = {
try {
val newEpoch = epochEndpoint.askSync[Long](GetCurrentEpoch)
for (i <- currentEpoch to newEpoch - 1) {
queue.put((null, null))
logDebug(s"Sent marker to start epoch ${i + 1}")
}
currentEpoch = newEpoch
} catch {
case t: Throwable =>
failureReason = t
failedFlag.set(true)
throw t
}
}
}
class DataReaderThread(
reader: DataReader[UnsafeRow],
queue: BlockingQueue[(UnsafeRow, PartitionOffset)],
context: TaskContext,
failedFlag: AtomicBoolean)
extends Thread(
s"continuous-reader--${context.partitionId()}--" +
s"${context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY)}") {
private[continuous] var failureReason: Throwable = _
override def run(): Unit = {
TaskContext.setTaskContext(context)
val baseReader = ContinuousDataSourceRDD.getBaseReader(reader)
try {
while (!context.isInterrupted && !context.isCompleted()) {
if (!reader.next()) {
// Check again, since reader.next() might have blocked through an incoming interrupt.
if (!context.isInterrupted && !context.isCompleted()) {
throw new IllegalStateException(
"Continuous reader reported no elements! Reader should have blocked waiting.")
} else {
return
}
}
queue.put((reader.get().copy(), baseReader.getOffset))
}
} catch {
case _: InterruptedException if context.isInterrupted() =>
// Continuous shutdown always involves an interrupt; do nothing and shut down quietly.
case t: Throwable =>
failureReason = t
failedFlag.set(true)
// Don't rethrow the exception in this thread. It's not needed, and the default Spark
// exception handler will kill the executor.
} finally {
reader.close()
}
}
}
object ContinuousDataSourceRDD {
private[continuous] def getBaseReader(reader: DataReader[UnsafeRow]): ContinuousDataReader[_] = {
reader match {
case r: ContinuousDataReader[UnsafeRow] => r
case wrapped: RowToUnsafeDataReader =>
wrapped.rowReader.asInstanceOf[ContinuousDataReader[Row]]
case _ =>
throw new IllegalStateException(s"Unknown continuous reader type ${reader.getClass}")
}
}
}
|
sureshthalamati/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousDataSourceRDDIter.scala
|
Scala
|
apache-2.0
| 8,460
|
/*
* Copyright (c) 1995, 2008, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle or the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scala.swing.examples.tutorials.components
import java.awt.{Dimension, Font}
import javax.swing.ImageIcon
import scala.swing._
/**
* Tutorial: How to Use Combo Boxes
* [[http://docs.oracle.com/javase/tutorial/uiswing/components/combobox.html]]
*
* Source code reference:
* [[http://docs.oracle.com/javase/tutorial/uiswing/examples/components/CustomComboBoxDemoProject/src/components/CustomComboBoxDemo.java]]
*
* CustomComboBoxDemo.scala uses the following files:
* /scala/swing/examples/tutorials/images/Bird.gif
* /scala/swing/examples/tutorials/images/Cat.gif
* /scala/swing/examples/tutorials/images/Dog.gif
* /scala/swing/examples/tutorials/images/Rabbit.gif
* /scala/swing/examples/tutorials/images/Pig.gif
*/
class CustomComboBoxDemo extends BorderPanel {
val petStrings: Array[String] = Array("Bird", "Cat", "Dog", "Rabbit", "Pig")
// val images: Array[Option[ImageIcon]] = new Array[Option[ImageIcon]](petStrings.length)
val intArray: Array[Int] = (0 until petStrings.length ).toArray
/*
* Despite its use of EmptyBorder, this panel makes a fine content
* pane because the empty border just increases the panel's size
* and is "painted" on top of the panel's normal background. In
* other words, the JPanel fills its entire background if it's
* opaque (which it is by default); adding a border doesn't change
* that.
*/
val images:Array[Option[ImageIcon]] = petStrings.map( pet => {
val oImage = CustomComboBoxDemo.createImageIcon(s"/scala/swing/examples/tutorials/images/$pet.gif")
oImage.map( img => {img.setDescription(pet); img} )
})
//Create the combo box.
val petList = new ComboBox[Int](intArray) {
renderer = new ComboBoxRenderer()
preferredSize = new Dimension(200, 130)
maximumRowCount = 3
}
//Lay out the demo.
layout(petList) = BorderPanel.Position.Center
border = Swing.EmptyBorder(20, 20, 20, 20)
class ComboBoxRenderer extends ListView.AbstractRenderer[Int, Label](new Label("")) {
var uhOhFont: Option[Font] = None
/*
* This method finds the image and text corresponding
* to the selected value and returns the label, set up
* to display the text and image.
*/
def configure( listMe: ListView[_], isSelected: Boolean, cellHasFocus: Boolean, a: Int, index: Int): Unit = {
//Set the icon and text. If icon was null, say so.
images(a) match {
case Some( icon ) =>
component.icon = icon
component.text = petStrings(a)
component.font = listMe.font
case None => setUhOhText( s"${petStrings(a)} (no image available)", listMe.font)
}
}
//Set the font and text when no image was found.
def setUhOhText(uhOhText: String, normalFont: Font): Unit = {
if (!uhOhFont.isDefined) { //lazily create this font
uhOhFont = Some(normalFont.deriveFont(Font.ITALIC))
}
component.font = uhOhFont.getOrElse( normalFont )
component.text = uhOhText
}
}
}
object CustomComboBoxDemo extends SimpleSwingApplication {
def createImageIcon(path: String): Option[javax.swing.ImageIcon] =
Option(resourceFromClassloader(path)).map(imgURL => Swing.Icon(imgURL))
lazy val top = new MainFrame() {
title = "CustomComboBoxDemo"
//Create and set up the content pane.
val newContentPane = new CustomComboBoxDemo()
newContentPane.opaque = true
contents = newContentPane
}
}
|
benhutchison/scala-swing
|
examples/src/main/scala/scala/swing/examples/tutorials/components/CustomComboBoxDemo.scala
|
Scala
|
bsd-3-clause
| 5,049
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.util.{Locale, TimeZone}
import scala.annotation.tailrec
import org.apache.commons.io.FileUtils
import org.scalatest.Assertions
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.rdd.BlockRDD
import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.logical.Aggregate
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.exchange.Exchange
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.MemorySink
import org.apache.spark.sql.execution.streaming.state.{StateSchemaNotCompatible, StateStore, StreamingAggregationStateManager}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.OutputMode._
import org.apache.spark.sql.streaming.util.{MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.{StructType, TimestampType}
import org.apache.spark.storage.{BlockId, StorageLevel, TestBlockId}
import org.apache.spark.util.Utils
object FailureSingleton {
var firstTime = true
}
class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions {
import testImplicits._
def executeFuncWithStateVersionSQLConf(
stateVersion: Int,
confPairs: Seq[(String, String)],
func: => Any): Unit = {
withSQLConf(confPairs ++
Seq(SQLConf.STREAMING_AGGREGATION_STATE_FORMAT_VERSION.key -> stateVersion.toString): _*) {
func
}
}
def testWithAllStateVersions(name: String, confPairs: (String, String)*)
(func: => Any): Unit = {
for (version <- StreamingAggregationStateManager.supportedVersions) {
test(s"$name - state format version $version") {
executeFuncWithStateVersionSQLConf(version, confPairs, func)
}
}
}
def testQuietlyWithAllStateVersions(name: String, confPairs: (String, String)*)
(func: => Any): Unit = {
for (version <- StreamingAggregationStateManager.supportedVersions) {
testQuietly(s"$name - state format version $version") {
executeFuncWithStateVersionSQLConf(version, confPairs, func)
}
}
}
testWithAllStateVersions("simple count, update mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 3, 2),
CheckLastBatch((3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 3), (2, 2), (1, 1)),
// By default we run in new tuple mode.
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4))
)
}
testWithAllStateVersions("count distinct") {
val inputData = MemoryStream[(Int, Seq[Int])]
val aggregated =
inputData.toDF()
.select($"*", explode($"_2") as 'value)
.groupBy($"_1")
.agg(size(collect_set($"value")))
.as[(Int, Int)]
testStream(aggregated, Update)(
AddData(inputData, (1, Seq(1, 2))),
CheckLastBatch((1, 2))
)
}
testWithAllStateVersions("simple count, complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 2),
CheckLastBatch((3, 1), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 2), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4), (3, 2), (2, 2), (1, 1))
)
}
testWithAllStateVersions("simple count, append mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
val e = intercept[AnalysisException] {
testStream(aggregated, Append)()
}
Seq("append", "not supported").foreach { m =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
}
}
testWithAllStateVersions("sort after aggregate in complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.toDF("value", "count")
.orderBy($"count".desc)
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch(isSorted = true, (3, 1)),
AddData(inputData, 2, 3),
CheckLastBatch(isSorted = true, (3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch(isSorted = true, (3, 3), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch(isSorted = true, (4, 4), (3, 3), (2, 2), (1, 1))
)
}
testWithAllStateVersions("state metrics - append mode") {
val inputData = MemoryStream[Int]
val aggWithWatermark = inputData.toDF()
.withColumn("eventTime", timestamp_seconds($"value"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
implicit class RichStreamExecution(query: StreamExecution) {
// this could be either empty row batch or actual batch
def stateNodes: Seq[SparkPlan] = {
query.lastExecution.executedPlan.collect {
case p if p.isInstanceOf[StateStoreSaveExec] => p
}
}
// Pick the latest progress that actually ran a batch
def lastExecutedBatch: StreamingQueryProgress = {
query.recentProgress.filter(_.durationMs.containsKey("addBatch")).last
}
def stateOperatorProgresses: Seq[StateOperatorProgress] = {
lastExecutedBatch.stateOperators
}
}
val clock = new StreamManualClock()
testStream(aggWithWatermark)(
// batchId 0
AddData(inputData, 15),
StartStream(Trigger.ProcessingTime("interval 1 second"), clock),
CheckAnswer(), // watermark = 0
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 1 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 1 without data
AdvanceManualClock(1000L), // watermark = 5
Execute { q => // wait for the no data batch to complete
eventually(timeout(streamingTimeout)) { assert(q.lastProgress.batchId === 1) }
},
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 1 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 2 with data
AddData(inputData, 10, 12, 14),
AdvanceManualClock(1000L), // watermark = 5
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 2 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 3 with data
AddData(inputData, 25),
AdvanceManualClock(1000L), // watermark = 5
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 3 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 4 without data
AdvanceManualClock(1000L), // watermark = 15
Execute { q => // wait for the no data batch to complete
eventually(timeout(streamingTimeout)) { assert(q.lastProgress.batchId === 4) }
},
CheckAnswer((10, 3)),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 2 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 1 }
)
}
testWithAllStateVersions("state metrics - update/complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDS()
.flatMap(x => Seq(x, x + 1))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
implicit class RichStreamExecution(query: StreamExecution) {
def stateNodes: Seq[SparkPlan] = {
query.lastExecution.executedPlan.collect {
case p if p.isInstanceOf[StateStoreSaveExec] => p
}
}
}
// Test with Update mode
testStream(aggregated, Update)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 }
)
// Test with Complete mode
inputData.reset()
testStream(aggregated, Complete)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((1, 1), (2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 4 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 }
)
}
testWithAllStateVersions("multiple keys") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value", $"value" + 1)
.agg(count("*"))
.as[(Int, Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 1), (2, 3, 1)),
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 2), (2, 3, 2))
)
}
testWithAllStateVersions("SPARK-29438: ensure UNION doesn't lead streaming aggregation to use" +
" shifted partition IDs") {
def constructUnionDf(desiredPartitionsForInput1: Int)
: (MemoryStream[Int], MemoryStream[Int], DataFrame) = {
val input1 = MemoryStream[Int](desiredPartitionsForInput1)
val input2 = MemoryStream[Int]
val df1 = input1.toDF()
.select($"value", $"value" + 1)
val df2 = input2.toDF()
.groupBy($"value")
.agg(count("*"))
// Unioned DF would have columns as (Int, Int)
(input1, input2, df1.union(df2))
}
withTempDir { checkpointDir =>
val (input1, input2, unionDf) = constructUnionDf(2)
testStream(unionDf, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
MultiAddData(input1, 11, 12)(input2, 21, 22),
CheckNewAnswer(Row(11, 12), Row(12, 13), Row(21, 1), Row(22, 1)),
StopStream
)
// We're restoring the query with different number of partitions in left side of UNION,
// which may lead right side of union to have mismatched partition IDs (e.g. if it relies on
// TaskContext.partitionId()). This test will verify streaming aggregation doesn't have
// such issue.
val (newInput1, newInput2, newUnionDf) = constructUnionDf(3)
newInput1.addData(11, 12)
newInput2.addData(21, 22)
testStream(newUnionDf, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
MultiAddData(newInput1, 13, 14)(newInput2, 22, 23),
CheckNewAnswer(Row(13, 14), Row(14, 15), Row(22, 2), Row(23, 1))
)
}
}
testQuietlyWithAllStateVersions("midbatch failure") {
val inputData = MemoryStream[Int]
FailureSingleton.firstTime = true
val aggregated =
inputData.toDS()
.map { i =>
if (i == 4 && FailureSingleton.firstTime) {
FailureSingleton.firstTime = false
sys.error("injected failure")
}
i
}
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
StartStream(),
AddData(inputData, 1, 2, 3, 4),
ExpectFailure[SparkException](),
StartStream(),
CheckLastBatch((1, 1), (2, 1), (3, 1), (4, 1))
)
}
testWithAllStateVersions("prune results by current_time or localtimestamp, complete mode") {
import testImplicits._
val inputDataOne = MemoryStream[Long]
val aggregatedOne =
inputDataOne.toDF()
.groupBy($"value")
.agg(count("*"))
.where('value >= current_timestamp().cast("long") - 10L)
val inputDataTwo = MemoryStream[Long]
val aggregatedTwo =
inputDataTwo.toDF()
.groupBy($"value")
.agg(count("*"))
.where('value >= localtimestamp().cast(TimestampType).cast("long") - 10L)
Seq((inputDataOne, aggregatedOne), (inputDataTwo, aggregatedTwo)).foreach { x =>
val inputData = x._1
val aggregated = x._2
val clock = new StreamManualClock
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// advance clock to 10 seconds, all keys retained
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 seconds, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 seconds, should retain keys >= 20
AddData(inputData, 0L, 85L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 seconds.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by a minute i.e., 90 seconds total
clock.advance(60 * 1000L)
true
},
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// The commit log blown, causing the last batch to re-run
CheckLastBatch((20L, 1), (85L, 1)),
AssertOnQuery { q =>
clock.getTimeMillis() == 90000L
},
// advance clock to 100 seconds, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
}
testWithAllStateVersions("prune results by current_date, complete mode") {
import testImplicits._
val clock = new StreamManualClock
val tz = TimeZone.getDefault.getID
val inputData = MemoryStream[Long]
val aggregated =
inputData.toDF()
.select(to_utc_timestamp(from_unixtime('value * SECONDS_PER_DAY), tz))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.where($"value".cast("date") >= date_sub(current_date(), 10))
.select(($"value".cast("long") / SECONDS_PER_DAY).cast("long"), $"count(1)")
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// advance clock to 10 days, should retain all keys
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 days, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 days, should retain keys >= 20
AddData(inputData, 85L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 days.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by 60 days i.e., 90 days total
clock.advance(MILLIS_PER_DAY * 60)
true
},
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// Commit log blown, causing a re-run of the last batch
CheckLastBatch((20L, 1), (85L, 1)),
// advance clock to 100 days, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
testWithAllStateVersions("SPARK-19690: do not convert batch aggregation in streaming query " +
"to streaming") {
val streamInput = MemoryStream[Int]
val batchDF = Seq(1, 2, 3, 4, 5)
.toDF("value")
.withColumn("parity", 'value % 2)
.groupBy('parity)
.agg(count("*") as 'joinValue)
val joinDF = streamInput
.toDF()
.join(batchDF, 'value === 'parity)
// make sure we're planning an aggregate in the first place
assert(batchDF.queryExecution.optimizedPlan match { case _: Aggregate => true })
testStream(joinDF, Append)(
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)),
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)))
}
/**
* This method verifies certain properties in the SparkPlan of a streaming aggregation.
* First of all, it checks that the child of a `StateStoreRestoreExec` creates the desired
* data distribution, where the child could be an Exchange, or a `HashAggregateExec` which already
* provides the expected data distribution.
*
* The second thing it checks that the child provides the expected number of partitions.
*
* The third thing it checks that we don't add an unnecessary shuffle in-between
* `StateStoreRestoreExec` and `StateStoreSaveExec`.
*/
private def checkAggregationChain(
se: StreamExecution,
expectShuffling: Boolean,
expectedPartition: Int): Boolean = {
val executedPlan = se.lastExecution.executedPlan
val restore = executedPlan
.collect { case ss: StateStoreRestoreExec => ss }
.head
restore.child match {
case node: UnaryExecNode =>
assert(node.outputPartitioning.numPartitions === expectedPartition,
"Didn't get the expected number of partitions.")
if (expectShuffling) {
assert(node.isInstanceOf[Exchange], s"Expected a shuffle, got: ${node.child}")
} else {
assert(!node.isInstanceOf[Exchange], "Didn't expect a shuffle")
}
case _ => fail("Expected no shuffling")
}
var reachedRestore = false
// Check that there should be no exchanges after `StateStoreRestoreExec`
executedPlan.foreachUp { p =>
if (reachedRestore) {
assert(!p.isInstanceOf[Exchange], "There should be no further exchanges")
} else {
reachedRestore = p.isInstanceOf[StateStoreRestoreExec]
}
}
true
}
testWithAllStateVersions("SPARK-21977: coalesce(1) with 0 partition RDD should be " +
"repartitioned to 1") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. Therefore in our SparkPlan, we
// don't have any shuffling. However, `coalesce(1)` only guarantees that the RDD has at most 1
// partition. Which means that if we have an input RDD with 0 partitions, nothing gets
// executed. Therefore the StateStore's don't save any delta files for a given trigger. This
// then leads to `FileNotFoundException`s in the subsequent batch.
// This isn't the only problem though. Once we introduce a shuffle before
// `StateStoreRestoreExec`, the input to the operator is an empty iterator. When performing
// `groupBy().agg(...)`, `HashAggregateExec` returns a `0` value for all aggregations. If
// we fail to restore the previous state in `StateStoreRestoreExec`, we save the 0 value in
// `StateStoreSaveExec` losing all previous state.
val aggregated: Dataset[Long] =
spark.readStream.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(1).groupBy().count().as[Long]
testStream(aggregated, Complete())(
AddBlockData(inputSource, Seq(1)),
CheckLastBatch(1),
AssertOnQuery("Verify no shuffling") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource), // create an empty trigger
CheckLastBatch(1),
AssertOnQuery("Verify that no exchange is required") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource, Seq(2, 3)),
CheckLastBatch(3),
AddBlockData(inputSource),
CheckLastBatch(3),
StopStream
)
}
}
testWithAllStateVersions("SPARK-21977: coalesce(1) with aggregation should still be " +
"repartitioned when it has non-empty grouping keys") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
withTempDir { tempDir =>
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. However, when we have
// non-empty grouping keys, in streaming, we must repartition to
// `spark.sql.shuffle.partitions`, otherwise only a single StateStore is used to process
// all keys. This may be fine, however, if the user removes the coalesce(1) or changes to
// a `coalesce(2)` for example, then the default behavior is to shuffle to
// `spark.sql.shuffle.partitions` many StateStores. When this happens, all StateStore's
// except 1 will be missing their previous delta files, which causes the stream to fail
// with FileNotFoundException.
def createDf(partitions: Int): Dataset[(Long, Long)] = {
spark.readStream
.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(partitions).groupBy('a % 1).count().as[(Long, Long)]
}
testStream(createDf(1), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddBlockData(inputSource, Seq(1)),
CheckLastBatch((0L, 1L)),
AssertOnQuery("Verify addition of exchange operator") { se =>
checkAggregationChain(
se,
expectShuffling = true,
spark.sessionState.conf.numShufflePartitions)
},
StopStream
)
testStream(createDf(2), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
Execute(se => se.processAllAvailable()),
AddBlockData(inputSource, Seq(2), Seq(3), Seq(4)),
CheckLastBatch((0L, 4L)),
AssertOnQuery("Verify no exchange added") { se =>
checkAggregationChain(
se,
expectShuffling = false,
spark.sessionState.conf.numShufflePartitions)
},
AddBlockData(inputSource),
CheckLastBatch((0L, 4L)),
StopStream
)
}
}
}
testWithAllStateVersions("SPARK-22230: last should change with new batches") {
val input = MemoryStream[Int]
val aggregated = input.toDF().agg(last('value))
testStream(aggregated, OutputMode.Complete())(
AddData(input, 1, 2, 3),
CheckLastBatch(3),
AddData(input, 4, 5, 6),
CheckLastBatch(6),
AddData(input),
CheckLastBatch(6),
AddData(input, 0),
CheckLastBatch(0)
)
}
testWithAllStateVersions("SPARK-23004: Ensure that TypedImperativeAggregate functions " +
"do not throw errors", SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
// See the JIRA SPARK-23004 for more details. In short, this test reproduces the error
// by ensuring the following.
// - A streaming query with a streaming aggregation.
// - Aggregation function 'collect_list' that is a subclass of TypedImperativeAggregate.
// - Post shuffle partition has exactly 128 records (i.e. the threshold at which
// ObjectHashAggregateExec falls back to sort-based aggregation). This is done by having a
// micro-batch with 128 records that shuffle to a single partition.
// This test throws the exact error reported in SPARK-23004 without the corresponding fix.
val input = MemoryStream[Int]
val df = input.toDF().toDF("value")
.selectExpr("value as group", "value")
.groupBy("group")
.agg(collect_list("value"))
testStream(df, outputMode = OutputMode.Update)(
AddData(input, (1 to spark.sqlContext.conf.objectAggSortBasedFallbackThreshold): _*),
AssertOnQuery { q =>
q.processAllAvailable()
true
}
)
}
test("simple count, update mode - recovery from checkpoint uses state format version 1") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.3.1-streaming-aggregate-state-format-1/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
inputData.addData(3)
inputData.addData(3, 2)
testStream(aggregated, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath,
additionalConfs = Map(SQLConf.STREAMING_AGGREGATION_STATE_FORMAT_VERSION.key -> "2")),
/*
Note: The checkpoint was generated using the following input in Spark version 2.3.1
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 3, 2),
CheckLastBatch((3, 2), (2, 1))
*/
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 3), (2, 2), (1, 1)),
Execute { query =>
// Verify state format = 1
val stateVersions = query.lastExecution.executedPlan.collect {
case f: StateStoreSaveExec => f.stateFormatVersion
case f: StateStoreRestoreExec => f.stateFormatVersion
}
assert(stateVersions.size == 2)
assert(stateVersions.forall(_ == 1))
},
// By default we run in new tuple mode.
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4))
)
}
testQuietlyWithAllStateVersions("changing schema of state when restarting query",
(SQLConf.STATE_STORE_FORMAT_VALIDATION_ENABLED.key, "false")) {
withTempDir { tempDir =>
val (inputData, aggregated) = prepareTestForChangingSchemaOfState(tempDir)
// if we don't have verification phase on state schema, modified query would throw NPE with
// stack trace which end users would not easily understand
testStream(aggregated, Update())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddData(inputData, 21),
ExpectFailure[SparkException] { e =>
val stateSchemaExc = findStateSchemaNotCompatible(e)
assert(stateSchemaExc.isDefined)
val msg = stateSchemaExc.get.getMessage
assert(msg.contains("Provided schema doesn't match to the schema for existing state"))
// other verifications are presented in StateStoreSuite
}
)
}
}
testQuietlyWithAllStateVersions("changing schema of state when restarting query -" +
" schema check off",
(SQLConf.STATE_SCHEMA_CHECK_ENABLED.key, "false"),
(SQLConf.STATE_STORE_FORMAT_VALIDATION_ENABLED.key, "false")) {
withTempDir { tempDir =>
val (inputData, aggregated) = prepareTestForChangingSchemaOfState(tempDir)
testStream(aggregated, Update())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddData(inputData, 21),
ExpectFailure[SparkException] { e =>
val stateSchemaExc = findStateSchemaNotCompatible(e)
// it would bring other error in runtime, but it shouldn't check schema in any way
assert(stateSchemaExc.isEmpty)
}
)
}
}
test("SPARK-35896: metrics in StateOperatorProgress are output correctly") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update) (
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "3")),
AddData(inputData, 3, 2, 1, 3),
CheckLastBatch((3, 2), (2, 1), (1, 1)),
assertNumStateRows(
total = Seq(3), updated = Seq(3), droppedByWatermark = Seq(0), removed = Some(Seq(0))),
AddData(inputData, 1, 4),
CheckLastBatch((1, 2), (4, 1)),
assertStateOperatorProgressMetric(
operatorName = "stateStoreSave", numShufflePartitions = 3, numStateStoreInstances = 3)
)
inputData.reset() // reset the input to clear any data from prev test
testStream(aggregated, Complete) (
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "3")),
AddData(inputData, 3, 2, 1, 3),
CheckLastBatch((3, 2), (2, 1), (1, 1)),
assertNumStateRows(
total = Seq(3), updated = Seq(3), droppedByWatermark = Seq(0), removed = Some(Seq(0))),
AddData(inputData, 1, 4),
CheckLastBatch((3, 2), (2, 1), (1, 2), (4, 1)),
assertStateOperatorProgressMetric(
operatorName = "stateStoreSave", numShufflePartitions = 3, numStateStoreInstances = 3)
)
// with watermark and append output mode
val aggWithWatermark = inputData.toDF()
.withColumn("eventTime", timestamp_seconds($"value"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
inputData.reset() // reset the input to clear any data from prev test
testStream(aggWithWatermark, Append) (
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "3")),
AddData(inputData, 3, 2, 1, 9),
CheckLastBatch(),
assertStateOperatorProgressMetric(
operatorName = "stateStoreSave", numShufflePartitions = 3, numStateStoreInstances = 3),
AddData(inputData, 25), // Advance watermark to 15 secs, no-data-batch drops rows <= 15
CheckLastBatch((0, 3), (5, 1)),
assertNumStateRows(
total = Seq(1), updated = Seq(1), droppedByWatermark = Seq(0), removed = Some(Seq(2)))
)
}
private def prepareTestForChangingSchemaOfState(
tempDir: File): (MemoryStream[Int], DataFrame) = {
val inputData = MemoryStream[Int]
val aggregated = inputData.toDF()
.selectExpr("value % 10 AS id", "value")
.groupBy($"id")
.agg(
sum("value").as("sum_value"),
avg("value").as("avg_value"),
max("value").as("max_value"))
testStream(aggregated, Update())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddData(inputData, 1, 11),
CheckLastBatch((1L, 12L, 6.0, 11)),
StopStream
)
StateStore.unloadAll()
val inputData2 = MemoryStream[Int]
val aggregated2 = inputData2.toDF()
.selectExpr("value % 10 AS id", "value")
.groupBy($"id")
.agg(
sum("value").as("sum_value"),
avg("value").as("avg_value"),
collect_list("value").as("values"))
inputData2.addData(1, 11)
(inputData2, aggregated2)
}
@tailrec
private def findStateSchemaNotCompatible(exc: Throwable): Option[StateSchemaNotCompatible] = {
exc match {
case e1: StateSchemaNotCompatible => Some(e1)
case e1 if e1.getCause != null => findStateSchemaNotCompatible(e1.getCause)
case _ => None
}
}
/** Add blocks of data to the `BlockRDDBackedSource`. */
case class AddBlockData(source: BlockRDDBackedSource, data: Seq[Int]*) extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
source.addBlocks(data: _*)
(source, LongOffset(source.counter))
}
}
/**
* A Streaming Source that is backed by a BlockRDD and that can create RDDs with 0 blocks at will.
*/
class BlockRDDBackedSource(spark: SparkSession) extends Source {
var counter = 0L
private val blockMgr = SparkEnv.get.blockManager
private var blocks: Seq[BlockId] = Seq.empty
def addBlocks(dataBlocks: Seq[Int]*): Unit = synchronized {
dataBlocks.foreach { data =>
val id = TestBlockId(counter.toString)
blockMgr.putIterator(id, data.iterator, StorageLevel.MEMORY_ONLY)
blocks ++= id :: Nil
counter += 1
}
counter += 1
}
override def getOffset: Option[Offset] = synchronized {
if (counter == 0) None else Some(LongOffset(counter))
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = synchronized {
val rdd = new BlockRDD[Int](spark.sparkContext, blocks.toArray)
.map(i => InternalRow(i)) // we don't really care about the values in this test
blocks = Seq.empty
spark.internalCreateDataFrame(rdd, schema, isStreaming = true).toDF()
}
override def schema: StructType = MockSourceProvider.fakeSchema
override def stop(): Unit = {
blockMgr.getMatchingBlockIds(_.isInstanceOf[TestBlockId]).foreach(blockMgr.removeBlock(_))
}
}
}
|
chuckchen/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala
|
Scala
|
apache-2.0
| 36,500
|
package net.mtgto.garoon.user
import org.specs2.mutable.Specification
class UserSpec extends Specification {
"User" should {
"be able to create an event from valid xml" in {
val xml =
<user
key="48"
version="1245376338"
order="0"
login_name="naka"
name="那珂"
status="0"
primary_organization="1032">
<organization xmlns="http://schemas.cybozu.co.jp/base/2008" id="2"/>
</user>
val user = User(xml)
user.identity.value must_== "48"
user.loginName must_== "naka"
user.name must_== "那珂"
user.organizationIds must_== Seq(OrganizationId("2"))
user.primaryOrganizationId must beSome(OrganizationId("1032"))
}
}
}
|
mtgto/garoon
|
src/test/scala/net/mtgto/garoon/user/UserSpec.scala
|
Scala
|
gpl-3.0
| 765
|
package org.jetbrains.plugins.scala.testingSupport.utest.scala2_10
import org.jetbrains.plugins.scala.testingSupport.utest.UTestSimpleTest
/**
* @author Roman.Shein
* @since 04.09.2015.
*/
class UTestSimpleTest_2_10 extends UTestTestBase_2_10 with UTestSimpleTest {
}
|
whorbowicz/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/utest/scala2_10/UTestSimpleTest_2_10.scala
|
Scala
|
apache-2.0
| 277
|
package me.apidoc.swagger
case class SchemaType(
swagger: String,
apidoc: String
)
object SchemaType {
val all = Seq(
SchemaType("int32", "integer"),
SchemaType("int64", "long"),
SchemaType("float", "double"),
SchemaType("decimal", "decimal"),
SchemaType("double", "double"),
SchemaType("string", "string"),
SchemaType("byte", "string"), // TODO: apidoc needs support for byte
SchemaType("boolean", "boolean"),
SchemaType("date", "date-iso8601"),
SchemaType("dateTime", "date-time-iso8601"),
SchemaType("uuid", "uuid")
)
def fromSwagger(
swaggerType: String,
format: Option[String]
): Option[String] = {
format match {
case None => all.find(_.swagger == swaggerType).map(_.apidoc)
case Some(format) => all.find(_.swagger == format).map(_.apidoc)
}
}
def fromSwaggerWithError(
swaggerType: String,
format: Option[String]
): String = {
fromSwagger(swaggerType, format).getOrElse {
sys.error(s"Could not resolve swagger type[$swaggerType] format[${format.getOrElse("")}]")
}
}
}
|
Seanstoppable/apidoc
|
swagger/src/main/scala/me/apidoc/swagger/SchemaType.scala
|
Scala
|
mit
| 1,096
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.{BigDecimal, BigInteger}
import java.lang.{Iterable => JIterable}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.table.functions.AggregateFunction
/** The initial accumulator for Integral Avg aggregate function */
class IntegralAvgAccumulator extends JTuple2[Long, Long] {
f0 = 0L //sum
f1 = 0L //count
}
/**
* Base class for built-in Integral Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class IntegralAvgAggFunction[T] extends AggregateFunction[T, IntegralAvgAccumulator] {
override def createAccumulator(): IntegralAvgAccumulator = {
new IntegralAvgAccumulator
}
def accumulate(acc: IntegralAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].longValue()
acc.f0 += v
acc.f1 += 1L
}
}
def retract(acc: IntegralAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].longValue()
acc.f0 -= v
acc.f1 -= 1L
}
}
override def getValue(acc: IntegralAvgAccumulator): T = {
if (acc.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(acc.f0 / acc.f1)
}
}
def merge(acc: IntegralAvgAccumulator, its: JIterable[IntegralAvgAccumulator]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
acc.f1 += a.f1
acc.f0 += a.f0
}
}
def resetAccumulator(acc: IntegralAvgAccumulator): Unit = {
acc.f0 = 0L
acc.f1 = 0L
}
override def getAccumulatorType: TypeInformation[IntegralAvgAccumulator] = {
new TupleTypeInfo(
classOf[IntegralAvgAccumulator],
BasicTypeInfo.LONG_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a Long container to save
* the intermediate result to avoid the overflow by sum operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: Long): T
}
/**
* Built-in Byte Avg aggregate function
*/
class ByteAvgAggFunction extends IntegralAvgAggFunction[Byte] {
override def resultTypeConvert(value: Long): Byte = value.toByte
}
/**
* Built-in Short Avg aggregate function
*/
class ShortAvgAggFunction extends IntegralAvgAggFunction[Short] {
override def resultTypeConvert(value: Long): Short = value.toShort
}
/**
* Built-in Int Avg aggregate function
*/
class IntAvgAggFunction extends IntegralAvgAggFunction[Int] {
override def resultTypeConvert(value: Long): Int = value.toInt
}
/** The initial accumulator for Big Integral Avg aggregate function */
class BigIntegralAvgAccumulator
extends JTuple2[BigInteger, Long] {
f0 = BigInteger.ZERO //sum
f1 = 0L //count
}
/**
* Base Class for Built-in Big Integral Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class BigIntegralAvgAggFunction[T]
extends AggregateFunction[T, BigIntegralAvgAccumulator] {
override def createAccumulator(): BigIntegralAvgAccumulator = {
new BigIntegralAvgAccumulator
}
def accumulate(acc: BigIntegralAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Long]
acc.f0 = acc.f0.add(BigInteger.valueOf(v))
acc.f1 += 1L
}
}
def retract(acc: BigIntegralAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Long]
acc.f0 = acc.f0.subtract(BigInteger.valueOf(v))
acc.f1 -= 1L
}
}
override def getValue(acc: BigIntegralAvgAccumulator): T = {
if (acc.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(acc.f0.divide(BigInteger.valueOf(acc.f1)))
}
}
def merge(acc: BigIntegralAvgAccumulator, its: JIterable[BigIntegralAvgAccumulator]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
acc.f1 += a.f1
acc.f0 = acc.f0.add(a.f0)
}
}
def resetAccumulator(acc: BigIntegralAvgAccumulator): Unit = {
acc.f0 = BigInteger.ZERO
acc.f1 = 0
}
override def getAccumulatorType: TypeInformation[BigIntegralAvgAccumulator] = {
new TupleTypeInfo(
classOf[BigIntegralAvgAccumulator],
BasicTypeInfo.BIG_INT_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a BigInteger container to
* save the intermediate result to avoid the overflow by sum
* operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: BigInteger): T
}
/**
* Built-in Long Avg aggregate function
*/
class LongAvgAggFunction extends BigIntegralAvgAggFunction[Long] {
override def resultTypeConvert(value: BigInteger): Long = value.longValue()
}
/** The initial accumulator for Floating Avg aggregate function */
class FloatingAvgAccumulator extends JTuple2[Double, Long] {
f0 = 0 //sum
f1 = 0L //count
}
/**
* Base class for built-in Floating Avg aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class FloatingAvgAggFunction[T] extends AggregateFunction[T, FloatingAvgAccumulator] {
override def createAccumulator(): FloatingAvgAccumulator = {
new FloatingAvgAccumulator
}
def accumulate(acc: FloatingAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].doubleValue()
acc.f0 += v
acc.f1 += 1L
}
}
def retract(acc: FloatingAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[Number].doubleValue()
acc.f0 -= v
acc.f1 -= 1L
}
}
override def getValue(acc: FloatingAvgAccumulator): T = {
if (acc.f1 == 0) {
null.asInstanceOf[T]
} else {
resultTypeConvert(acc.f0 / acc.f1)
}
}
def merge(acc: FloatingAvgAccumulator, its: JIterable[FloatingAvgAccumulator]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
acc.f1 += a.f1
acc.f0 += a.f0
}
}
def resetAccumulator(acc: FloatingAvgAccumulator): Unit = {
acc.f0 = 0
acc.f1 = 0L
}
override def getAccumulatorType: TypeInformation[FloatingAvgAccumulator] = {
new TupleTypeInfo(
classOf[FloatingAvgAccumulator],
BasicTypeInfo.DOUBLE_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
/**
* Convert the intermediate result to the expected aggregation result type
*
* @param value the intermediate result. We use a Double container to save
* the intermediate result to avoid the overflow by sum operation.
* @return the result value with the expected aggregation result type
*/
def resultTypeConvert(value: Double): T
}
/**
* Built-in Float Avg aggregate function
*/
class FloatAvgAggFunction extends FloatingAvgAggFunction[Float] {
override def resultTypeConvert(value: Double): Float = value.toFloat
}
/**
* Built-in Int Double aggregate function
*/
class DoubleAvgAggFunction extends FloatingAvgAggFunction[Double] {
override def resultTypeConvert(value: Double): Double = value
}
/** The initial accumulator for Big Decimal Avg aggregate function */
class DecimalAvgAccumulator extends JTuple2[BigDecimal, Long] {
f0 = BigDecimal.ZERO //sum
f1 = 0L //count
}
/**
* Base class for built-in Big Decimal Avg aggregate function
*/
class DecimalAvgAggFunction extends AggregateFunction[BigDecimal, DecimalAvgAccumulator] {
override def createAccumulator(): DecimalAvgAccumulator = {
new DecimalAvgAccumulator
}
def accumulate(acc: DecimalAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
acc.f0 = acc.f0.add(v)
acc.f1 += 1L
}
}
def retract(acc: DecimalAvgAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
acc.f0 = acc.f0.subtract(v)
acc.f1 -= 1L
}
}
override def getValue(acc: DecimalAvgAccumulator): BigDecimal = {
if (acc.f1 == 0) {
null.asInstanceOf[BigDecimal]
} else {
acc.f0.divide(BigDecimal.valueOf(acc.f1))
}
}
def merge(acc: DecimalAvgAccumulator, its: JIterable[DecimalAvgAccumulator]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
acc.f0 = acc.f0.add(a.f0)
acc.f1 += a.f1
}
}
def resetAccumulator(acc: DecimalAvgAccumulator): Unit = {
acc.f0 = BigDecimal.ZERO
acc.f1 = 0L
}
override def getAccumulatorType: TypeInformation[DecimalAvgAccumulator] = {
new TupleTypeInfo(
classOf[DecimalAvgAccumulator],
BasicTypeInfo.BIG_DEC_TYPE_INFO,
BasicTypeInfo.LONG_TYPE_INFO)
}
}
|
zohar-mizrahi/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/AvgAggFunction.scala
|
Scala
|
apache-2.0
| 10,006
|
/*
* Copyright 2015-2016 Snowflake Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.snowflake.spark.snowflake
import org.apache.spark.sql.Row
import Utils.SNOWFLAKE_SOURCE_NAME
/**
* Created by mzukowski on 8/13/16.
*/
class FilterPushdownIntegrationSuite extends IntegrationSuiteBase {
private val test_table: String = s"test_table_$randomSuffix"
// Values used for comparison
private val row1 = Row(null, "Hello")
private val row2 = Row(2, "Snowflake")
private val row3 = Row(3, "Spark")
private val row4 = Row(4, null)
override def beforeAll(): Unit = {
super.beforeAll()
jdbcUpdate(s"create or replace table $test_table(i int, s string)")
jdbcUpdate(
s"insert into $test_table values(null, 'Hello'), (2, 'Snowflake'), (3, 'Spark'), (4, null)"
)
}
test("Test Simple Comparisons") {
testFilter(
"s = 'Hello'",
s"""( S IS NOT NULL) AND S = 'Hello'""",
Seq(row1)
)
testFilter("i > 2", s"""( I IS NOT NULL) AND I > 2""", Seq(row3, row4))
testFilter("i < 3", s"""( I IS NOT NULL) AND I < 3""", Seq(row2))
}
// Doesn't work with Spark 1.4.1
test("Test >= and <=") {
testFilter(
"i >= 2",
s"""( I IS NOT NULL) AND I >= 2""",
Seq(row2, row3, row4)
)
testFilter("i <= 3", s"""( I IS NOT NULL) AND I <= 3""", Seq(row2, row3))
}
test("Test logical operators") {
testFilter(
"i >= 2 AND i <= 3",
s"""( I IS NOT NULL) AND I >= 2 AND I <= 3""",
Seq(row2, row3)
)
testFilter(
"NOT i = 3",
s"""( I IS NOT NULL) AND (NOT ( I = 3 ))""",
Seq(row2, row4)
)
testFilter(
"NOT i = 3 OR i IS NULL",
s"""(( (NOT ( I = 3 )) ) OR ( ( I IS NULL) ))""",
Seq(row1, row2, row4)
)
testFilter(
"i IS NULL OR i > 2 AND s IS NOT NULL",
s"""(( ( I IS NULL) ) OR ( (( I > 2 ) AND ( ( S IS NOT NULL) )) ))""",
Seq(row1, row3)
)
}
test("Test IN") {
testFilter("i IN ( 2, 3)", s"""( I IN ( 2 , 3 ))""", Seq(row2, row3))
}
override def afterAll(): Unit = {
try {
jdbcUpdate(s"drop table if exists $test_table")
} finally {
super.afterAll()
}
}
/**
* Verify that the filter is pushed down by looking at the generated SQL,
* and check the results are as expected
*/
def testFilter(filter: String,
expectedWhere: String,
expectedAnswer: Seq[Row]): Unit = {
val loadedDf = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", s"$test_table")
.option("autopushdown", "off")
.load()
.filter(filter)
.sort("i")
checkAnswer(loadedDf, expectedAnswer)
// Verify the query issued is what we expect
val expectedQuery =
s"""SELECT "I", "S" FROM $test_table WHERE $expectedWhere"""
assert(Utils.getLastSelect == expectedQuery)
}
}
|
snowflakedb/spark-snowflakedb
|
src/it/scala/net/snowflake/spark/snowflake/FilterPushdownIntegrationSuite.scala
|
Scala
|
apache-2.0
| 3,458
|
package es.weso.wiFetcher.dao
import cucumber.api.scala.ScalaDsl
import cucumber.api.scala.EN
import org.scalatest.Matchers
import es.weso.wiFetcher.entities.traits.SubIndex
import es.weso.wiFetcher.entities.traits.Component
import es.weso.wiFetcher.fetchers.SpreadsheetsFetcher
import es.weso.wiFetcher.utils.FileUtils
import java.io.FileInputStream
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import es.weso.wiFetcher.dao.poi.SubIndexDAOImpl
import java.io.File
class SubIndexDAOImplSteps extends ScalaDsl with EN with Matchers{
var subIndexDAO : SubIndexDAO = null
var subIndexes : List[SubIndex] = null
var components : List[Component] = null
var subIndex : SubIndex = null
var component : Component = null
var indicators : Int = 0
Given("""^I want to load all information about subindexes in the WebIndex$""") {() =>
val fetcher = SpreadsheetsFetcher(
new File(FileUtils.getFilePath("files/Structure.xlsx", true)),
new File(FileUtils.getFilePath("files/example.xlsx", true)))
subIndexDAO = new SubIndexDAOImpl(new FileInputStream(
FileUtils.getFilePath("files/Structure.xlsx", true)))(fetcher)
subIndexDAO should not be (null)
}
When("""^I check all subindexes and components are loaded$""") {() =>
subIndexes = subIndexDAO.getSubIndexes
components = subIndexDAO.getComponents
components.size should not be (0)
subIndexes.size should not be (0)
}
When("""^I check the subindex with "([^"]*)" "([^"]*)"$""") { (property : String, value : String) =>
subIndex = property match {
case "name" => subIndexes.find(subindex => subindex.names.get("en").get.equals(value)).getOrElse(throw new IllegalArgumentException("There is no subindex with name " + value))
case "id" => subIndexes.find(subindex => subindex.id.equals(value)).getOrElse(throw new IllegalArgumentException("There is no subindex with id " + value))
case _ => throw new IllegalArgumentException
}
}
When("""^I check the numbers of components of subindex "([^"]*)"$""") {(subindex : String) =>
subIndex = subIndexDAO.getSubIndexes.find(sub => sub.id.equals(subindex)).getOrElse(throw new IllegalArgumentException)
}
When("""^I check the component with "([^"]*)" "([^"]*)"$""") { (property : String, value : String) =>
component = property match {
case "name" => components.find(comp => comp.names.get("en").get.equals(value)).getOrElse(throw new IllegalArgumentException("There is no subindex with name " + value))
case "id" => components.find(comp => comp.id.equals(value)).getOrElse(throw new IllegalArgumentException("There is no subindex with id " + value))
case _ => throw new IllegalArgumentException
}
}
Then("""^the component "([^"]*)" should be "([^"]*)"$""") { (property : String, value : String) =>
property match {
case "name" => component.names.get("en").get should be (value)
case "id" => component.id should be (value)
case "description" => component.descriptions.get("en").get should be (value)
case "weight" => component.weight should be (value.toDouble +- 0.0000001f)
}
}
Then("""^the number of components should be "([^"]*)"$""") {(comps : Int) =>
subIndex.getComponents.size should be (comps)
}
Then("""^There are "([^"]*)" subindexes and "([^"]*)" components$""") { (subindexes : Int, comps : Int) =>
subIndexes.size should be (subindexes)
components.size should be (comps)
}
Then("""^the subindex "([^"]*)" should be "([^"]*)"$""") {(property : String, value : String) =>
property match {
case "name" => subIndex.names.get("es").get should be (value)
case "id" => subIndex.id should be (value)
case "description" => subIndex.descriptions.get("en").get should be (value)
case "weight" => subIndex.weight should be (value.toDouble +- 0.0000001f)
}
}
}
|
weso/wiFetcher
|
test/es/weso/wiFetcher/dao/SubIndexDAOImplSteps.scala
|
Scala
|
apache-2.0
| 3,923
|
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.bigquery.client
import com.google.api.client.googleapis.json.GoogleJsonResponseException
import com.google.api.services.bigquery.model._
import com.google.cloud.bigquery.storage.v1beta1.ReadOptions.TableReadOptions
import com.google.cloud.bigquery.storage.v1beta1.Storage._
import com.google.cloud.bigquery.storage.v1beta1.TableReferenceProto
import com.google.cloud.hadoop.util.ApiErrorExtractor
import com.spotify.scio.bigquery.client.BigQuery.Client
import com.spotify.scio.bigquery.{StorageUtil, Table => STable, TableRow}
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericDatumReader, GenericRecord}
import org.apache.avro.io.{BinaryDecoder, DecoderFactory}
import org.apache.beam.sdk.extensions.gcp.options.GcsOptions
import org.apache.beam.sdk.io.gcp.bigquery.BigQueryIO.Write.{CreateDisposition, WriteDisposition}
import org.apache.beam.sdk.io.gcp.bigquery.{BigQueryAvroUtilsWrapper, BigQueryOptions}
import org.apache.beam.sdk.io.gcp.{bigquery => bq}
import org.apache.beam.sdk.options.PipelineOptionsFactory
import org.joda.time.Instant
import org.joda.time.format.DateTimeFormat
import org.slf4j.LoggerFactory
import scala.jdk.CollectionConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import scala.util.control.NonFatal
private[client] object TableOps {
private val Logger = LoggerFactory.getLogger(this.getClass)
private val TablePrefix = "scio_query"
private val TimeFormatter = DateTimeFormat.forPattern("yyyyMMddHHmmss")
private val StagingDatasetPrefix = "scio_bigquery_staging_"
private val StagingDatasetTableExpirationMs = 86400000L
private val StagingDatasetDescription = "Staging dataset for temporary tables"
}
final private[client] class TableOps(client: Client) {
import TableOps._
/** Get rows from a table. */
def rows(table: STable): Iterator[TableRow] =
storageRows(table, TableReadOptions.getDefaultInstance)
def avroRows(table: STable): Iterator[GenericRecord] =
storageAvroRows(table, TableReadOptions.getDefaultInstance)
def storageRows(table: STable, readOptions: TableReadOptions): Iterator[TableRow] =
withBigQueryService { bqServices =>
val tb = bqServices.getTable(table.ref, readOptions.getSelectedFieldsList)
storageAvroRows(table, readOptions).map { gr =>
BigQueryAvroUtilsWrapper.convertGenericRecordToTableRow(gr, tb.getSchema)
}
}
def storageAvroRows(table: STable, readOptions: TableReadOptions): Iterator[GenericRecord] = {
val tableRefProto = TableReferenceProto.TableReference
.newBuilder()
.setDatasetId(table.ref.getDatasetId)
.setTableId(table.ref.getTableId)
.setProjectId(Option(table.ref.getProjectId).getOrElse(client.project))
val request = CreateReadSessionRequest
.newBuilder()
.setTableReference(tableRefProto)
.setReadOptions(readOptions)
.setParent(s"projects/${client.project}")
.setRequestedStreams(1)
.setFormat(DataFormat.AVRO)
.build()
val session = client.storage.createReadSession(request)
val readRowsRequest = ReadRowsRequest
.newBuilder()
.setReadPosition(
StreamPosition
.newBuilder()
.setStream(session.getStreams(0))
)
.build()
val schema = new Schema.Parser().parse(session.getAvroSchema.getSchema)
val reader = new GenericDatumReader[GenericRecord](schema)
val responses = client.storage.readRowsCallable().call(readRowsRequest).asScala
var decoder: BinaryDecoder = null
responses.iterator.flatMap { resp =>
val bytes = resp.getAvroRows.getSerializedBinaryRows.toByteArray
decoder = DecoderFactory.get().binaryDecoder(bytes, decoder)
val res = ArrayBuffer.empty[GenericRecord]
while (!decoder.isEnd) {
res += reader.read(null, decoder)
}
res.iterator
}
}
/** Get schema from a table. */
def schema(tableSpec: String): TableSchema =
schema(bq.BigQueryHelpers.parseTableSpec(tableSpec))
/** Get schema from a table. */
def schema(tableRef: TableReference): TableSchema =
Cache.getOrElse(bq.BigQueryHelpers.toTableSpec(tableRef), Cache.SchemaCache)(
table(tableRef).getSchema
)
/** Get schema from a table using the storage API. */
def storageReadSchema(
tableSpec: String,
selectedFields: List[String] = Nil,
rowRestriction: Option[String] = None
): Schema =
Cache.getOrElse(
s"""$tableSpec;${selectedFields
.mkString(",")};$rowRestriction""",
Cache.SchemaCache
) {
val tableRef = bq.BigQueryHelpers.parseTableSpec(tableSpec)
val tableRefProto = TableReferenceProto.TableReference
.newBuilder()
.setProjectId(Option(tableRef.getProjectId).getOrElse(client.project))
.setDatasetId(tableRef.getDatasetId)
.setTableId(tableRef.getTableId)
val request = CreateReadSessionRequest
.newBuilder()
.setTableReference(tableRefProto)
.setReadOptions(StorageUtil.tableReadOptions(selectedFields, rowRestriction))
.setParent(s"projects/${client.project}")
.build()
val session = client.storage.createReadSession(request)
new Schema.Parser().parse(session.getAvroSchema.getSchema)
}
/** Get table metadata. */
def table(tableSpec: String): Table =
table(bq.BigQueryHelpers.parseTableSpec(tableSpec))
/** Get table metadata. */
def table(tableRef: TableReference): Table = {
val p = Option(tableRef.getProjectId).getOrElse(client.project)
client.underlying.tables().get(p, tableRef.getDatasetId, tableRef.getTableId).execute()
}
/** Get list of tables in a dataset. */
def tableReferences(projectId: String, datasetId: String): Seq[TableReference] =
tableReferences(Option(projectId), datasetId)
/** Get list of tables in a dataset. */
def tableReferences(projectId: Option[String], datasetId: String): Seq[TableReference] = {
val b = Seq.newBuilder[TableReference]
val req = client.underlying.tables().list(projectId.getOrElse(client.project), datasetId)
var rep = req.execute()
Option(rep.getTables).foreach(_.asScala.foreach(b += _.getTableReference))
while (rep.getNextPageToken != null) {
rep = req.setPageToken(rep.getNextPageToken).execute()
Option(rep.getTables)
.foreach(_.asScala.foreach(b += _.getTableReference))
}
b.result()
}
def create(table: Table): Unit = withBigQueryService(_.createTable(table))
def create(table: TableReference, schema: TableSchema): Unit =
create(new Table().setTableReference(table).setSchema(schema))
def create(tableSpec: String, schema: TableSchema): Unit =
create(bq.BigQueryHelpers.parseTableSpec(tableSpec), schema)
/**
* Check if table exists. Returns `true` if table exists, `false` is table definitely does not
* exist, throws in other cases (BigQuery exception, network issue etc.).
*/
def exists(tableRef: TableReference): Boolean =
try {
table(tableRef)
true
} catch {
case e: GoogleJsonResponseException
if e.getDetails.getErrors.get(0).getReason == "notFound" =>
false
case e: Throwable => throw e
}
/**
* Check if table exists. Returns `true` if table exists, `false` is table definitely does not
* exist, throws in other cases (BigQuery exception, network issue etc.).
*/
def exists(tableSpec: String): Boolean =
exists(bq.BigQueryHelpers.parseTableSpec(tableSpec))
/** Write rows to a table. */
def writeRows(
tableReference: TableReference,
rows: List[TableRow],
schema: TableSchema,
writeDisposition: WriteDisposition,
createDisposition: CreateDisposition
): Long = withBigQueryService { service =>
val table = new Table().setTableReference(tableReference).setSchema(schema)
if (createDisposition == CreateDisposition.CREATE_IF_NEEDED) {
service.createTable(table)
}
writeDisposition match {
case WriteDisposition.WRITE_TRUNCATE =>
delete(tableReference)
service.createTable(table)
case WriteDisposition.WRITE_EMPTY =>
require(service.isTableEmpty(tableReference))
case WriteDisposition.WRITE_APPEND =>
}
service.insertAll(tableReference, rows.asJava)
}
/** Write rows to a table. */
def writeRows(
tableSpec: String,
rows: List[TableRow],
schema: TableSchema = null,
writeDisposition: WriteDisposition = WriteDisposition.WRITE_APPEND,
createDisposition: CreateDisposition = CreateDisposition.CREATE_IF_NEEDED
): Long =
writeRows(
bq.BigQueryHelpers.parseTableSpec(tableSpec),
rows,
schema,
writeDisposition,
createDisposition
)
private[bigquery] def withBigQueryService[T](f: bq.BigQueryServicesWrapper => T): T = {
val options = PipelineOptionsFactory
.create()
.as(classOf[BigQueryOptions])
options.setProject(client.project)
options.setGcpCredential(client.credentials)
try {
f(new bq.BigQueryServicesWrapper(options))
} finally {
Option(options.as(classOf[GcsOptions]).getExecutorService)
.foreach(_.shutdown())
}
}
/** Delete table */
private[bigquery] def delete(table: TableReference): Unit = {
client.underlying
.tables()
.delete(
Option(table.getProjectId).getOrElse(client.project),
table.getDatasetId,
table.getTableId
)
.execute()
()
}
/* Create a staging dataset at a specified location, e.g US */
private[bigquery] def prepareStagingDataset(location: String): Unit = {
val datasetId = stagingDatasetId(location)
try {
client.underlying.datasets().get(client.project, datasetId).execute()
Logger.info(s"Staging dataset ${client.project}:$datasetId already exists")
} catch {
case e: GoogleJsonResponseException if ApiErrorExtractor.INSTANCE.itemNotFound(e) =>
Logger.info(s"Creating staging dataset ${client.project}:$datasetId")
val dsRef = new DatasetReference().setProjectId(client.project).setDatasetId(datasetId)
val ds = new Dataset()
.setDatasetReference(dsRef)
.setDefaultTableExpirationMs(StagingDatasetTableExpirationMs)
.setDescription(StagingDatasetDescription)
.setLocation(location)
client.underlying
.datasets()
.insert(client.project, ds)
.execute()
()
case NonFatal(e) => throw e
}
}
/* Creates a temporary table in the staging dataset */
private[bigquery] def createTemporary(location: String): TableReference = {
val now = Instant.now().toString(TimeFormatter)
val tableId = TablePrefix + "_" + now + "_" + Random.nextInt(Int.MaxValue)
new TableReference()
.setProjectId(client.project)
.setDatasetId(stagingDatasetId(location))
.setTableId(tableId)
}
private def stagingDatasetId(location: String): String =
StagingDatasetPrefix + location.toLowerCase.replaceAll("-", "_")
}
|
spotify/scio
|
scio-google-cloud-platform/src/main/scala/com/spotify/scio/bigquery/client/TableOps.scala
|
Scala
|
apache-2.0
| 11,655
|
package org.scalatra
import test.scalatest.ScalatraFunSuite
class ErrorHandlerTest extends ScalatraFunSuite {
trait TestException extends RuntimeException
case class Exception1() extends TestException
case class Exception2() extends TestException
class BaseServlet extends ScalatraServlet {
get("/1") {
status = 418
throw new Exception1
}
get("/uncaught") { throw new RuntimeException }
error { case e: TestException => "base" }
}
class ChildServlet extends BaseServlet {
get("/2") { throw new Exception2 }
error { case e: Exception2 => "child" }
}
addServlet(new BaseServlet, "/base/*")
addServlet(new ChildServlet, "/child/*")
test("result of error handler should be rendered") {
get("/base/1") {
body should equal ("base")
}
}
test("error handlers are composable") {
get("/child/2") {
body should equal ("child")
}
get("/child/1") {
body should equal ("base")
}
}
test("response status should not be set on error") {
get("/base/1") {
status should equal (418)
}
}
test("rethrows uncaught exceptions") {
get("/base/uncaught") {
status should equal (500)
}
}
}
|
kuochaoyi/scalatra
|
core/src/test/scala/org/scalatra/ErrorHandlerTest.scala
|
Scala
|
bsd-2-clause
| 1,212
|
// Copyright 2016 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scala.test
object ScalaLibResources {
def getGreetings() = get("hellos")
def getFarewells = get("byes")
def getData = get("/test/data/some.txt")
private def get(s: String): List[String] =
scala.io.Source
.fromInputStream(getClass.getResourceAsStream(s))
.getLines
.toList
}
|
sdtwigg/rules_scala
|
test/src/main/scala/scala/test/ScalaLibResources.scala
|
Scala
|
apache-2.0
| 932
|
package edu.rice.habanero.benchmarks.logmap
import edu.rice.habanero.actors.HabaneroActor
import edu.rice.habanero.benchmarks.logmap.LogisticMapConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import edu.rice.hj.Module0._
import edu.rice.hj.api.{HjDataDrivenFuture, HjRunnable, HjSuspendable}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object LogisticMapHabaneroPauseResumeActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new LogisticMapHabaneroPauseResumeActorBenchmark)
}
private final class LogisticMapHabaneroPauseResumeActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
LogisticMapConfig.parseArgs(args)
}
def printArgInfo() {
LogisticMapConfig.printArgs()
}
def runIteration() {
var master: Master = null
finish(new HjSuspendable {
override def run() = {
master = new Master()
master.start()
master.send(StartMessage.ONLY)
}
})
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private class Master extends HabaneroActor[AnyRef] {
private final val self = this
private final val numComputers: Int = LogisticMapConfig.numSeries
private final val computers = Array.tabulate[RateComputer](numComputers)(i => {
val rate = LogisticMapConfig.startRate + (i * LogisticMapConfig.increment)
new RateComputer(rate)
})
private final val numWorkers: Int = LogisticMapConfig.numSeries
private final val workers = Array.tabulate[SeriesWorker](numWorkers)(i => {
val rateComputer = computers(i % numComputers)
val startTerm = i * LogisticMapConfig.increment
new SeriesWorker(i, self, rateComputer, startTerm)
})
private var numWorkRequested: Int = 0
private var numWorkReceived: Int = 0
private var termsSum: Double = 0
protected override def onPostStart() {
computers.foreach(loopComputer => {
loopComputer.start()
})
workers.foreach(loopWorker => {
loopWorker.start()
})
}
override def process(theMsg: AnyRef) {
theMsg match {
case _: StartMessage =>
var i: Int = 0
while (i < LogisticMapConfig.numTerms) {
// request each worker to compute the next term
workers.foreach(loopWorker => {
loopWorker.send(NextTermMessage.ONLY)
})
i += 1
}
// workers should stop after all items have been computed
workers.foreach(loopWorker => {
loopWorker.send(GetTermMessage.ONLY)
numWorkRequested += 1
})
case rm: ResultMessage =>
termsSum += rm.term
numWorkReceived += 1
if (numWorkRequested == numWorkReceived) {
println("Terms sum: " + termsSum)
computers.foreach(loopComputer => {
loopComputer.send(StopMessage.ONLY)
})
workers.foreach(loopWorker => {
loopWorker.send(StopMessage.ONLY)
})
exit()
}
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
private class SeriesWorker(id: Int, master: Master, computer: RateComputer, startTerm: Double) extends HabaneroActor[AnyRef] {
private final val self = this
private final val curTerm = Array.tabulate[Double](1)(i => startTerm)
override def process(theMsg: AnyRef) {
theMsg match {
case computeMessage: NextTermMessage =>
val sender = newDataDrivenFuture[ResultMessage]()
val newMessage = new ComputeMessage(sender, curTerm(0))
computer.send(newMessage)
pause()
asyncNbAwait(sender, new HjRunnable {
override def run() = {
val resultMessage = sender.get()
curTerm(0) = resultMessage.term
resume()
}
})
case _: GetTermMessage =>
master.send(new ResultMessage(curTerm(0)))
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
private class RateComputer(rate: Double) extends HabaneroActor[AnyRef] {
override def process(theMsg: AnyRef) {
theMsg match {
case computeMessage: ComputeMessage =>
val result = computeNextTerm(computeMessage.term, rate)
val resultMessage = new ResultMessage(result)
val sender = computeMessage.sender.asInstanceOf[HjDataDrivenFuture[ResultMessage]]
sender.put(resultMessage)
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
}
|
shamsmahmood/savina
|
src/main/scala/edu/rice/habanero/benchmarks/logmap/LogisticMapHabaneroPauseResumeActorBenchmark.scala
|
Scala
|
gpl-2.0
| 5,106
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query.adjusters
import com.twitter.zipkin.common._
import com.twitter.zipkin.Constants
import scala.collection.Map
class TimeSkewAdjuster extends Adjuster {
val TimeSkewAddServerRecv = AdjusterWarning("TIME_SKEW_ADD_SERVER_RECV")
val TimeSkewAddServerSend = AdjusterWarning("TIME_SKEW_ADD_SERVER_SEND")
/**
* Adjusts Spans timestamps so that each child happens after their parents.
* This is to counteract clock skew on servers, we want the Trace to happen in order.
*/
def adjust(trace: Trace): Trace = {
trace.getRootSpan match {
case Some(s) => Trace(adjust(trace.getSpanTree(s, trace.getIdToChildrenMap), None))
case None => trace
}
}
private[this] def adjust(span: SpanTreeEntry, previousSkew: Option[ClockSkew]): AdjusterSpanTreeEntry = {
adjust(AdjusterSpanTreeEntry(span), previousSkew)
}
/**
* Recursively adjust the timestamps on the span tree. Root span is the reference point,
* all children's timestamps gets adjusted based on that span's timestamps.
*/
private[this] def adjust(span: AdjusterSpanTreeEntry, previousSkew: Option[ClockSkew]) : AdjusterSpanTreeEntry = {
val previousAdjustedSpan = previousSkew match {
// adjust skew for particular endpoint brought over from the parent span
case Some(endpointSkew) => adjustTimestamps(span, endpointSkew)
case None => span
}
val validatedSpan = validateSpan(previousAdjustedSpan)
// find out if the server endpoint has any skew compared to the client
getClockSkew(validatedSpan.span) match {
case Some(es) =>
val adjustedSpan = adjustTimestamps(validatedSpan, es)
// adjusting the timestamp of the endpoint with the skew
// both for the current span and the direct children
new AdjusterSpanTreeEntry(
adjustedSpan.span,
adjustedSpan.children.map(adjust(_, Some(es))).toList,
adjustedSpan.messages
)
case None =>
new AdjusterSpanTreeEntry(
validatedSpan.span,
validatedSpan.children.map(adjust(_, None)).toList,
validatedSpan.messages
)
// could not figure out clock skew, return untouched.
}
}
/**
* Misc fixes to make sure the clock skew is adjusted correctly:
* - For spans that only have CLIENT_SEND and CLIENT_RECEIVE annotations,
* create the corresponding SERVER_ annotations and propagate the skew
* adjustment to the children. TimeSkewAdjuster assumes that for a
* particular service, there is no skew between the server and client
* annotations; when the server annotations are missing, they are
* essentially estranged and we can't adjust the children accordingly.
* The created SERVER_ annotations have the same timestamp as the CLIENT_
* annotations.
* NOTE: this could also be refactored out into its own adjuster that would
* run before TimeSkewAdjuster, if necessary
*/
private[this] def validateSpan(spanTree: AdjusterSpanTreeEntry): AdjusterSpanTreeEntry = {
// if we have only CS and CR, inject a SR and SS
val span = spanTree.span
val annotationsMap = span.getAnnotationsAsMap
var annotations = span.annotations
var children = spanTree.children
var warnings = spanTree.messages
if (span.isValid &&
spanTree.children.length > 0 &&
containsClientCoreAnnotations(annotationsMap) &&
!containsServerCoreAnnotations(annotationsMap)
) {
// pick up the endpoint first child's client send, if exists
val endpoint = spanTree.children.head.span.clientSideAnnotations match {
case head :: _ => head.host
case _ => None
}
val serverRecvTs = span.getAnnotation(Constants.ClientSend) match {
case Some(a) =>
annotations = annotations :+ Annotation(a.timestamp, Constants.ServerRecv, endpoint)
warnings = warnings :+ TimeSkewAddServerRecv
a.timestamp
case _ => // This should never actually happen since we checked in the IF
throw new AdjusterException
}
val serverSendTs = span.getAnnotation(Constants.ClientRecv) match {
case Some(a) =>
annotations = annotations :+ Annotation(a.timestamp, Constants.ServerSend, endpoint)
warnings = warnings :+ TimeSkewAddServerSend
a.timestamp
case _ => // This should never actually happen since we checked in the IF
throw new AdjusterException
}
/**
* We need to manually propagate the clock skew for the children since
* the assumption there is no clock skew on the same endpoint is no longer
* valid if the children have clock skew.
*
* Since we're computing the skew in a particular endpoint, we use the
* span's SERVER_* annotations as the client arguments and the
* child's CLIENT_* annotations as the server arguments
*/
children = children map { c =>
c.span.getAnnotationsAsMap match {
case csa if containsClientCoreAnnotations(csa) =>
val clientSendTs = csa(Constants.ClientSend).timestamp
val clientRecvTs = csa(Constants.ClientRecv).timestamp
endpoint flatMap { getClockSkew(serverRecvTs, serverSendTs, clientSendTs, clientRecvTs, _) } match {
case Some(endpointSkew) => adjustTimestamps(c, endpointSkew)
case _ => c
}
case _ =>
// doesn't have all client core annotations, so don't do anything
c
}
}
}
val newSpan = Span(span.traceId, span.name, span.id, span.parentId, annotations, span.binaryAnnotations)
new AdjusterSpanTreeEntry(newSpan, children, warnings)
}
/**
* Do we have all the annotations we need to calculate clock skew?
*/
private[this] def containsAllCoreAnnotations(annotations: Map[String, Annotation]): Boolean = {
containsClientCoreAnnotations(annotations) && containsServerCoreAnnotations(annotations)
}
private[this] def containsClientCoreAnnotations(annotations: Map[String, Annotation]): Boolean = {
annotations.contains(Constants.ClientSend) && annotations.contains(Constants.ClientRecv)
}
private[this] def containsServerCoreAnnotations(annotations: Map[String, Annotation]): Boolean = {
annotations.contains(Constants.ServerSend) && annotations.contains(Constants.ServerRecv)
}
/**
* Get the endpoint out of the first matching annotation.
*/
private[this] def getEndpoint(
allAnnotations: Map[String, Annotation],
findByAnnotations: List[String]
): Option[Endpoint] = {
findByAnnotations.map(allAnnotations.get(_).flatMap(_.host)).head
}
case class ClockSkew(endpoint: Endpoint, skew: Long)
/**
* Calculate the clock skew between two servers based
* on the annotations in a span.
*/
private[this] def getClockSkew(span: Span): Option[ClockSkew] = {
val annotations = span.getAnnotationsAsMap
if (!containsAllCoreAnnotations(annotations)) None else {
getEndpoint(annotations, List(Constants.ServerRecv, Constants.ServerSend)) flatMap { ep =>
getClockSkew(
getTimestamp(annotations, Constants.ClientSend),
getTimestamp(annotations, Constants.ClientRecv),
getTimestamp(annotations, Constants.ServerRecv),
getTimestamp(annotations, Constants.ServerSend),
ep
)
}
}
}
/**
* Calculate the clock skew between two servers based on annotations in a span
*
* Only adjust for clock skew if the core annotations are not in the following order:
* - Client send
* - Server receive
* - Server send
* - Client receive
*
* Special case: if the server (child) span is longer than the client (parent), then do not
* adjust for clock skew.
*/
private[this] def getClockSkew(
clientSend: Long,
clientRecv: Long,
serverRecv: Long,
serverSend: Long,
endpoint: Endpoint
): Option[ClockSkew] = {
val clientDuration = clientRecv - clientSend
val serverDuration = serverSend - serverRecv
// There is only clock skew if CS is after SR or CR is before SS
val csAhead = clientSend < serverRecv
val crAhead = clientRecv > serverSend
if (serverDuration > clientDuration || (csAhead && crAhead)) None else {
val latency = (clientDuration - serverDuration) / 2
(serverRecv - latency - clientSend) match {
case 0 => None
case _ => Some(ClockSkew(endpoint, serverRecv - latency - clientSend))
}
}
}
/**
* Extract timestamp for this particular event value.
*/
private[this] def getTimestamp(annotations: Map[String, Annotation], value: String) : Long = {
annotations.get(value) match {
case Some(a) => a.timestamp
case None => throw new IncompleteTraceDataException("Could not find annotation matching " + value)
}
}
/**
* Adjust the span's annotation timestamps for the endpoint by skew ms.
*/
private[this] def adjustTimestamps(spanTree: AdjusterSpanTreeEntry, clockSkew: ClockSkew) : AdjusterSpanTreeEntry = {
if (clockSkew.skew == 0) spanTree else {
def isHost(ep: Endpoint, value: String): Boolean =
clockSkew.endpoint.ipv4 == ep.ipv4 ||
(value == Constants.ClientRecv || value == Constants.ClientSend) &&
Constants.LocalhostLoopBackIP == ep.ipv4
val span = spanTree.span
val annotations = span.annotations map { a =>
a.host match {
case Some(ep) if isHost(ep, a.value) => a.copy(timestamp = a.timestamp - clockSkew.skew)
case _ => a
}
}
new AdjusterSpanTreeEntry(span.copy(annotations = annotations), spanTree.children, spanTree.messages)
}
}
}
|
jfeltesse-mdsol/zipkin
|
zipkin-query/src/main/scala/com/twitter/zipkin/query/adjusters/TimeSkewAdjuster.scala
|
Scala
|
apache-2.0
| 10,396
|
package edu.rice.habanero.benchmarks.bitonicsort
import java.util.Random
import edu.rice.habanero.actors.{FuncJavaActor, FuncJavaActorState, FuncJavaPool}
import edu.rice.habanero.benchmarks.philosopher.PhilosopherAkkaActorBenchmark.ExitMessage
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import scala.collection.mutable.ListBuffer
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object BitonicSortFuncJavaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BitonicSortFuncJavaActorBenchmark)
}
private final class BitonicSortFuncJavaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BitonicSortConfig.parseArgs(args)
}
def printArgInfo() {
BitonicSortConfig.printArgs()
}
def runIteration() {
val validationActor = new ValidationActor(BitonicSortConfig.N)
validationActor.start()
val adapterActor = new DataValueAdapterActor(validationActor)
adapterActor.start()
val kernelActor = new BitonicSortKernelActor(BitonicSortConfig.N, true, adapterActor)
kernelActor.start()
val sourceActor = new IntSourceActor(BitonicSortConfig.N, BitonicSortConfig.M, BitonicSortConfig.S, kernelActor)
sourceActor.start()
sourceActor.send(StartMessage())
FuncJavaActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
FuncJavaPool.shutdown()
}
}
}
private case class NextActorMessage(actor: FuncJavaActor[AnyRef])
private case class ValueMessage(value: Long)
private case class DataMessage(orderId: Int, value: Long)
private case class StartMessage()
private class ValueDataAdapterActor(orderId: Int, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(new DataMessage(orderId, vm.value))
case dm: DataMessage =>
nextActor.send(dm)
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class DataValueAdapterActor(nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(vm)
case dm: DataMessage =>
nextActor.send(new ValueMessage(dm.value))
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class RoundRobinSplitterActor(name: String, length: Int, receivers: Array[FuncJavaActor[AnyRef]]) extends FuncJavaActor[AnyRef] {
private var receiverIndex = 0
private var currentRun = 0
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
receivers(receiverIndex).send(vm)
currentRun += 1
if (currentRun == length) {
receiverIndex = (receiverIndex + 1) % receivers.length
currentRun = 0
}
case em: ExitMessage =>
receivers.foreach(loopActor => loopActor.send(em))
exit()
}
}
}
private class RoundRobinJoinerActor(name: String, length: Int, numJoiners: Int, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
private val receivedData = Array.tabulate[ListBuffer[DataMessage]](numJoiners)(i => new ListBuffer[DataMessage]())
private var forwardIndex = 0
private var currentRun = 0
private var exitsReceived = 0
override def process(msg: AnyRef) {
msg match {
case dm: DataMessage =>
receivedData(dm.orderId).append(dm)
tryForwardMessages(dm)
case em: ExitMessage =>
exitsReceived += 1
if (exitsReceived == numJoiners) {
nextActor.send(em)
exit()
}
}
}
def tryForwardMessages(dm: DataMessage) {
while (receivedData(forwardIndex).nonEmpty) {
val dm = receivedData(forwardIndex).remove(0)
val vm = new ValueMessage(dm.value)
nextActor.send(vm)
currentRun += 1
if (currentRun == length) {
forwardIndex = (forwardIndex + 1) % numJoiners
currentRun = 0
}
}
}
}
/**
* Compares the two input keys and exchanges their order if they are not sorted.
*
* sortDirection determines if the sort is nondecreasing (UP) [true] or nonincreasing (DOWN) [false].
*/
private class CompareExchangeActor(orderId: Int, sortDirection: Boolean, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
private var k1: Long = 0
private var valueAvailable = false
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
if (!valueAvailable) {
valueAvailable = true
k1 = vm.value
} else {
valueAvailable = false
val k2 = vm.value
val (minK, maxK) = if (k1 <= k2) (k1, k2) else (k2, k1)
if (sortDirection) {
// UP sort
nextActor.send(DataMessage(orderId, minK))
nextActor.send(DataMessage(orderId, maxK))
} else {
// DOWN sort
nextActor.send(DataMessage(orderId, maxK))
nextActor.send(DataMessage(orderId, minK))
}
}
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
/**
* Partition the input bitonic sequence of length L into two bitonic sequences of length L/2,
* with all numbers in the first sequence <= all numbers in the second sequence if sortdir is UP (similar case for DOWN sortdir)
*
* Graphically, it is a bunch of CompareExchanges with same sortdir, clustered together in the sort network at a particular step (of some merge stage).
*/
private class PartitionBitonicSequenceActor(orderId: Int, length: Int, sortDir: Boolean, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val halfLength = length / 2
val forwardActor = {
val actor = new ValueDataAdapterActor(orderId, nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("Partition-" + orderId, 1, halfLength, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[FuncJavaActor[AnyRef]](halfLength)(i => {
val actor = new CompareExchangeActor(i, sortDir, joinerActor)
actor.start()
actor
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("Partition-" + orderId, 1, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of a particular merge stage (used by all merge stages except the last)
*
* directionCounter determines which step we are in the current merge stage (which in turn is determined by <L, numSeqPartitions>)
*/
private class StepOfMergeActor(orderId: Int, length: Int, numSeqPartitions: Int, directionCounter: Int, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val forwardActor = {
val actor = new DataValueAdapterActor(nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfMerge-" + orderId + ":" + length, length, numSeqPartitions, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[FuncJavaActor[AnyRef]](numSeqPartitions)(i => {
// finding out the currentDirection is a bit tricky -
// the direction depends only on the subsequence number during the FIRST step.
// So to determine the FIRST step subsequence to which this sequence belongs, divide this sequence's number j by directionCounter
// (bcoz 'directionCounter' tells how many subsequences of the current step make up one subsequence of the FIRST step).
// Then, test if that result is even or odd to determine if currentDirection is UP or DOWN respectively.
val currentDirection = (i / directionCounter) % 2 == 0
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, currentDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, currentDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfMerge-" + orderId + ":" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of the last merge stage
*
* Main difference form StepOfMerge is the direction of sort.
* It is always in the same direction - sortdir.
*/
private class StepOfLastMergeActor(length: Int, numSeqPartitions: Int, sortDirection: Boolean, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfLastMerge-" + length, length, numSeqPartitions, nextActor)
actor.start()
actor
}
val workerActors = Array.tabulate[FuncJavaActor[AnyRef]](numSeqPartitions)(i => {
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, sortDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, sortDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfLastMerge-" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* Divide the input sequence of length N into subsequences of length P and sort each of them
* (either UP or DOWN depending on what subsequence number [0 to N/P-1] they get.
* All even subsequences are sorted UP and all odd subsequences are sorted DOWN).
* In short, a MergeStage is N/P Bitonic Sorters of order P each.
* But, this MergeStage is implemented *iteratively* as logP STEPS.
*/
private class MergeStageActor(P: Int, N: Int, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val forwardActor = {
var loopActor: FuncJavaActor[AnyRef] = nextActor
// for each of the lopP steps (except the last step) of this merge stage
var i = P / 2
while (i >= 1) {
// length of each sequence for the current step - goes like P, P/2, ..., 2.
val L = P / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = (N / P) * i
val directionCounter = i
val tempActor = new StepOfMergeActor(i, L, numSeqPartitions, directionCounter, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The LastMergeStage is basically one Bitonic Sorter of order N i.e.,
* it takes the bitonic sequence produced by the previous merge stages
* and applies a bitonic merge on it to produce the final sorted sequence.
*
* This is implemented iteratively as logN steps.
*/
private class LastMergeStageActor(N: Int, sortDirection: Boolean, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val forwardActor = {
var loopActor: FuncJavaActor[AnyRef] = nextActor
// for each of the lopN steps (except the last step) of this merge stage
var i = N / 2
while (i >= 1) {
// length of each sequence for the current step - goes like N, N/2, ..., 2.
val L = N / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = i
val tempActor = new StepOfLastMergeActor(L, numSeqPartitions, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The top-level kernel of bitonic-sort (iterative version) -
* It has logN merge stages and all merge stages except the last progressively builds a bitonic sequence out of the input sequence.
* The last merge stage acts on the resultant bitonic sequence to produce the final sorted sequence (sortdir determines if it is UP or DOWN).
*/
private class BitonicSortKernelActor(N: Int, sortDirection: Boolean, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
val forwardActor = {
var loopActor: FuncJavaActor[AnyRef] = nextActor
{
val tempActor = new LastMergeStageActor(N, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
}
var i = N / 2
while (i >= 2) {
val tempActor = new MergeStageActor(i, N, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long, nextActor: FuncJavaActor[AnyRef]) extends FuncJavaActor[AnyRef] {
private val random = new Random(seed)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case nm: StartMessage =>
var i = 0
while (i < numValues) {
val candidate = Math.abs(random.nextLong()) % maxValue
if (BitonicSortConfig.debug) {
sb.append(candidate + " ")
}
val message = new ValueMessage(candidate)
nextActor.send(message)
i += 1
}
if (BitonicSortConfig.debug) {
println(" SOURCE: " + sb)
}
nextActor.send(ExitMessage())
exit()
}
}
}
private class ValidationActor(numValues: Int) extends FuncJavaActor[AnyRef] {
private var sumSoFar = 0.0
private var valuesSoFar = 0
private var prevValue = 0L
private var errorValue = (-1L, -1)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
valuesSoFar += 1
if (BitonicSortConfig.debug) {
sb.append(vm.value + " ")
}
if (vm.value < prevValue && errorValue._1 < 0) {
errorValue = (vm.value, valuesSoFar - 1)
}
prevValue = vm.value
sumSoFar += prevValue
case em: ExitMessage =>
if (valuesSoFar == numValues) {
if (BitonicSortConfig.debug) {
println(" OUTPUT: " + sb)
}
if (errorValue._1 >= 0) {
println(" ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2)
} else {
println(" Elements sum: " + sumSoFar)
}
} else {
println(" ERROR: early exit triggered, received only " + valuesSoFar + " values!")
}
exit()
}
}
}
}
|
smarr/savina
|
src/main/scala/edu/rice/habanero/benchmarks/bitonicsort/BitonicSortFuncJavaActorBenchmark.scala
|
Scala
|
gpl-2.0
| 16,684
|
package io.opencensus.scala.akka.http.utils
import akka.NotUsed
import akka.http.scaladsl.model.{HttpEntity, HttpResponse}
import akka.stream.scaladsl.Flow
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, FlowShape, Inlet, Outlet}
object ExecuteAfterResponse {
private class AfterResponseFlow[Element](
onFinish: () => Unit,
onFailure: Throwable => Unit
) extends GraphStage[FlowShape[Element, Element]] {
private val in = Inlet[Element]("in")
private val out = Outlet[Element]("out")
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with InHandler with OutHandler {
def onPush(): Unit = push(out, grab(in))
def onPull(): Unit = pull(in)
setHandler(in, this)
setHandler(out, this)
override def onUpstreamFinish(): Unit = {
onFinish()
super.onUpstreamFinish()
}
override def onUpstreamFailure(ex: Throwable): Unit = {
onFailure(ex)
super.onUpstreamFailure(ex)
}
}
override val shape = FlowShape(in, out)
}
private object AfterResponseFlow {
def apply[Element](
onFinish: () => Unit,
onFailure: Throwable => Unit
): Flow[Element, Element, NotUsed] =
Flow.fromGraph(new AfterResponseFlow(onFinish, onFailure))
}
def onComplete(
response: HttpResponse,
onFinish: () => Unit,
onFailure: Throwable => Unit
): HttpResponse = {
response.copy(
entity = if (response.status.allowsEntity) {
response.entity.transformDataBytes(
AfterResponseFlow(onFinish, onFailure)
)
} else {
onFinish()
HttpEntity.Empty
}
)
}
}
|
census-ecosystem/opencensus-scala
|
akka-http/src/main/scala/io/opencensus/scala/akka/http/utils/ExecuteAfterResponse.scala
|
Scala
|
apache-2.0
| 1,805
|
package crakken.actor
import akka.actor._
import crakken.data.repository._
import scala.concurrent.ExecutionContext
import ExecutionContext.Implicits.global
import akka.event.LoggingReceive
import akka.util.ByteString
import reactivemongo.bson.BSONDocument
object CrakkenRepositoryServiceActor {
def props(repository: CrakkenRepository): Props = Props(new CrakkenRepositoryServiceActor(repository))
}
class CrakkenRepositoryServiceActor(val repository: CrakkenRepository) extends Actor with UnboundedStash with ActorLogging {
def receive = LoggingReceive {
case CrawlRequestMessages.create(request) => {
val replyTo = sender
repository.crawlRequestRepository.create(request)
}
case CrawlRequestMessages.getById(id: String) => {
val replyTo = sender
repository.crawlRequestRepository.getById(id) onComplete(tryResponse => replyTo ! CrawlRequestMessages.gotById(tryResponse))
}
case CrawlRequestMessages.getAll => {
val replyTo = sender
repository.crawlRequestRepository.getAll() onComplete(tryResponse => replyTo ! CrawlRequestMessages.gotAll(tryResponse))
}
case GridFsMessages.create(data: ByteString, filename: String, contentType: String, metadata: BSONDocument) => {
val replyTo = sender
repository.gridFsRepository.create(data, filename, contentType, metadata) onComplete(tryResponse => replyTo ! GridFsMessages.created(tryResponse))
}
case GridFsMessages.getById(id: String) => {
val replyTo = sender
repository.gridFsRepository.getById(id) onComplete(tryResponse => replyTo ! GridFsMessages.gotById(tryResponse))
}
case PageFetchRequestMessages.create(request) => {
val replyTo = sender
repository.pageFetchRequestRepository.create(request)
}
case PageFetchRequestMessages.getById(id: String) => {
val replyTo = sender
repository.pageFetchRequestRepository.getById(id) onComplete(tryResponse => replyTo ! PageFetchRequestMessages.gotById(tryResponse))
}
case PageFetchRequestMessages.getByCrId(id: String) => {
val replyTo = sender
repository.pageFetchRequestRepository.getByCrId(id) onComplete(tryResponse => replyTo ! PageFetchRequestMessages.gotByCrId(tryResponse))
}
case PageFetchRequestMessages.getAll => {
val replyTo = sender
repository.pageFetchRequestRepository.getAll onComplete(tryResponse => replyTo ! PageFetchRequestMessages.gotAll(tryResponse))
}
case PageFetchRequestMessages.update(request) => {
val replyTo = sender
repository.pageFetchRequestRepository.update(request)
}
}
}
|
CrakkenCrawler/crakken
|
app/crakken/actor/CrakkenRepositoryServiceActor.scala
|
Scala
|
apache-2.0
| 2,622
|
package mb1102a
import skidbladnir.{Assembly,Base,Compo,Handle,Interface,Mono,Multi},
rs485master.{RS485,IRS485,ISocket},
owen_io.{Address,Command,IOConst,Port,OwenIOException}
object Test {def main(args: Array[String]): Unit = {val a = new TestAssembly}}
class TestAssembly extends Assembly {
visualization
new Main named "main"
new RS485 connected "iRS485" from "main" named "485"
new MB1102A connected "iMB1102A" from "main" named "MB1102A"
"iSocket" from "485" connect "MB1102A"
"iFirstChannel" from "main" connect "MB1102A"
"iSecondChannel" from "main" connect "MB1102A"
gowait()
end
}
class Main extends Base {
//Interfaces
protected val iRS485:IRS485.Pm = jack(new IRS485.Pm)
protected val iMB1102A:IMB1102A.Pm = multijack(new IMB1102A.Pm)
protected val iFirstChannel = plug(new IMB1102AData.Pm)
protected val iSecondChannel = plug(new IMB1102AData.Pm)
//Main
main(()=>{
iRS485.imports.createLine(new Port(IOConst.COM_3, IOConst.spd_9600, IOConst.prty_NONE, IOConst.databits_8, IOConst.stopbit_1, IOConst.RS485CONV_AUTO))
iMB1102A.imports.head._2.setAddress(new Address(16,IOConst.ADRTYPE_8BIT))
for(i <- 0 to 100){
try{println(iFirstChannel.imports.get())}catch{case e:Exception =>{println(e)}}
try{println(iSecondChannel.imports.get())}catch{case e:Exception =>{println(e)}}
sleep(50)}
selfdestruction
})
}
|
AlexCAB/ChartRecorder
|
mb1102a/Test.scala
|
Scala
|
mit
| 1,529
|
/** soar
*
* Copyright (c) 2017 Hugo Firth
* Email: <me@hugofirth.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ncl.la.soar.glance.web.client.component
import java.time.Instant
import java.util.UUID
import diode.data.Pot
import diode.react.{ModelProxy, ReactConnectProxy}
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import uk.ac.ncl.la.soar.StudentNumber
import uk.ac.ncl.la.soar.glance.eval.{IncompleteResponse, Survey, SurveyResponse}
import uk.ac.ncl.la.soar.glance.web.client.SurveyModel
import uk.ac.ncl.la.soar.glance.web.client.style.Icon
import scala.scalajs.js.Date
/** Description of Class
*
* @author hugofirth
*/
object SurveyResponseForm {
case class Props(proxy: ModelProxy[Pot[SurveyModel]],
submitHandler: Option[IncompleteResponse] => Callback)
case class State(respondent: String)
sealed trait FormField
case object EmailField extends FormField
class Backend(bs: BackendScope[Props, State]) {
private def formValueChange(e: ReactEventFromInput) = {
val text = e.target.value
bs.modState(s => s.copy(respondent = text))
}
private val emailRegex =
"""^[a-zA-Z0-9\\.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$""".r
private def validEmail(e: String): Boolean = {
if (e.trim.isEmpty)
false
else if (emailRegex.findFirstMatchIn(e).isDefined)
true
else
false
}
private def buildResponseFromProps(p: Props, s: State) = {
p.proxy().toOption.map { sm =>
IncompleteResponse(sm.survey,
sm.simpleRanking,
sm.detailedRanking,
s.respondent,
sm.startTime,
UUID.randomUUID)
}
}
def render(p: Props, s: State): VdomElement = {
<.form(
<.div(
^.className := (if (validEmail(s.respondent)) "form-group" else "form-group has-error"),
<.label(^.`for` := "emailInput", "University Email"),
<.input(
^.`type` := "email",
^.className := "form-control",
^.id := "emailInput",
^.placeholder := "Email",
^.onChange ==> formValueChange
)
),
<.button(
^.`type` := "button",
^.className := "btn btn-primary pull-right",
"Submit",
(^.disabled := true).when(!validEmail(s.respondent)),
^.onClick --> p.submitHandler(buildResponseFromProps(p, s))
)
)
}
}
val component = ScalaComponent
.builder[Props]("SurveyResponseForm")
.initialStateFromProps(p => State(""))
.renderBackend[Backend]
.build
}
|
NewcastleComputingScience/student-outcome-accelerator
|
glance-eval/js/src/main/scala/uk/ac/ncl/la/soar/glance/web/client/component/SurveyResponseForm.scala
|
Scala
|
apache-2.0
| 3,356
|
package com.datastax.spark.connector.sql
import com.datastax.spark.connector.SparkCassandraITFlatSpecBase
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.embedded.SparkTemplate._
import org.apache.spark.sql.cassandra.CassandraSQLContext
class CassandraSQLSpec extends SparkCassandraITFlatSpecBase {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
useSparkConf(defaultSparkConf)
val conn = CassandraConnector(defaultConf)
var cc: CassandraSQLContext = null
conn.withSessionDo { session =>
session.execute("DROP KEYSPACE IF EXISTS sql_test")
session.execute("CREATE KEYSPACE IF NOT EXISTS sql_test WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 }")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.test1 (a INT, b INT, c INT, d INT, e INT, f INT, g INT, h INT, PRIMARY KEY ((a, b, c), d , e, f))")
session.execute("USE sql_test")
session.execute("CREATE INDEX test1_g ON test1(g)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 1, 1, 1, 1)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 2, 1, 1, 2)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 1, 1, 2, 1)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 2, 1, 2, 2)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 1, 2, 1, 1)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 2, 2, 1, 2)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 1, 2, 2, 1)")
session.execute("INSERT INTO sql_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 2, 2, 2, 2)")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.test2 (a INT, b INT, c INT, name TEXT, PRIMARY KEY (a, b))")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (1, 1, 1, 'Tom')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (1, 2, 3, 'Larry')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (1, 3, 3, 'Henry')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (2, 1, 3, 'Jerry')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (2, 2, 3, 'Alex')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (2, 3, 3, 'John')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (3, 1, 3, 'Jack')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (3, 2, 3, 'Hank')")
session.execute("INSERT INTO sql_test.test2 (a, b, c, name) VALUES (3, 3, 3, 'Dug')")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.test3 (a INT, b INT, c INT, PRIMARY KEY (a, b))")
session.execute("CREATE KEYSPACE IF NOT EXISTS sql_test2 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 }")
session.execute("CREATE TABLE IF NOT EXISTS sql_test2.test3 (a INT, b INT, c INT, PRIMARY KEY (a, b))")
session.execute("CREATE TABLE IF NOT EXISTS sql_test2.test2 (a INT, b INT, c INT, PRIMARY KEY (a, b))")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (1, 1, 1)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (1, 2, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (1, 3, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (2, 1, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (2, 2, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (2, 3, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (3, 1, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (3, 2, 3)")
session.execute("INSERT INTO sql_test2.test2 (a, b, c) VALUES (3, 3, 3)")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.test_data_type (a ASCII, b INT, c FLOAT, d DOUBLE, e BIGINT, f BOOLEAN, g DECIMAL, " +
" h INET, i TEXT, j TIMESTAMP, k UUID, l VARINT, PRIMARY KEY ((a), b, c))")
session.execute("INSERT INTO sql_test.test_data_type (a, b, c, d, e, f, g, h, i, j, k, l) VALUES (" +
"'ascii', 10, 12.34, 12.3456789, 123344556, true, 12.36, '74.125.239.135', 'text', '2011-02-03 04:05+0000', 123e4567-e89b-12d3-a456-426655440000 ,123456)")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.test_data_type1 (a ASCII, b INT, c FLOAT, d DOUBLE, e BIGINT, f BOOLEAN, g DECIMAL, " +
" h INET, i TEXT, j TIMESTAMP, k UUID, l VARINT, PRIMARY KEY ((a), b, c))")
session.execute(
s"""
|CREATE TABLE IF NOT EXISTS sql_test.test_collection
| (a INT, b SET<TIMESTAMP>, c MAP<TIMESTAMP, TIMESTAMP>, d List<TIMESTAMP>, PRIMARY KEY (a))
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|INSERT INTO sql_test.test_collection (a, b, c, d)
|VALUES (1,
| {'2011-02-03','2011-02-04'},
| {'2011-02-03':'2011-02-04', '2011-02-06':'2011-02-07'},
| ['2011-02-03','2011-02-04'])
""".stripMargin.replaceAll("\\n", " "))
session.execute("CREATE TYPE sql_test.address (street text, city text, zip int, date TIMESTAMP)")
session.execute("CREATE TABLE IF NOT EXISTS sql_test.udts(key INT PRIMARY KEY, name text, addr frozen<address>)")
session.execute("INSERT INTO sql_test.udts(key, name, addr) VALUES (1, 'name', {street: 'Some Street', city: 'Paris', zip: 11120})")
session.execute(
s"""
|CREATE TYPE IF NOT EXISTS sql_test.category_metadata (
| category_id text,
| metric_descriptors list <text>
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|CREATE TYPE IF NOT EXISTS sql_test.object_metadata (
| name text,
| category_metadata frozen<category_metadata>,
| bucket_size int
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|CREATE TYPE IF NOT EXISTS sql_test.relation (
| type text,
| object_type text,
| related_to text,
| obj_id text
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|CREATE TABLE IF NOT EXISTS sql_test.objects (
| obj_id text,
| metadata frozen<object_metadata>,
| relations list<frozen<relation>>,
| ts timestamp, PRIMARY KEY(obj_id)
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|CREATE TABLE IF NOT EXISTS sql_test.objects_copy (
| obj_id text,
| metadata frozen<object_metadata>,
| relations list<frozen<relation>>,
| ts timestamp, PRIMARY KEY(obj_id)
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|INSERT INTO sql_test.objects (obj_id, ts, metadata, relations)
|values (
| '123', '2015-06-16 15:53:23-0400',
| {
| name: 'foo',
| category_metadata: {
| category_id: 'thermostat',
| metric_descriptors: []
| },
| bucket_size: 0
| },
| [
| {
| type: 'a',
| object_type: 'b',
| related_to: 'c',
| obj_id: 'd'
| },
| {
| type: 'a1',
| object_type: 'b1',
| related_to: 'c1',
| obj_id: 'd1'
| }
| ]
|)
""".stripMargin.replaceAll("\\n", " "))
}
override def beforeAll() {
super.beforeAll()
cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
}
it should "allow to select all rows" in {
val result = cc.sql("SELECT * FROM test1").collect()
result should have length 8
}
it should "allow to select rows with index columns" in {
val result = cc.sql("SELECT * FROM test1 WHERE g = 2").collect()
result should have length 4
}
it should "allow to select rows with >= clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b >= 2").collect()
result should have length 4
}
it should "allow to select rows with > clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b > 2").collect()
result should have length 0
}
it should "allow to select rows with < clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b < 2").collect()
result should have length 4
}
it should "allow to select rows with <= clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b <= 2").collect()
result should have length 8
}
it should "allow to select rows with in clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b in (1,2)").collect()
result should have length 8
}
it should "allow to select rows with in clause pushed down" in {
val query = cc.sql("SELECT * FROM test2 WHERE a in (1,2)")
query.queryExecution.sparkPlan.nodeName should be ("Filter")
val result = query.collect()
result should have length 6
}
it should "allow to select rows with or clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b = 2 or b = 1").collect()
result should have length 8
}
it should "allow to select rows with != clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b != 2").collect()
result should have length 4
}
it should "allow to select rows with <> clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b <> 2").collect()
result should have length 4
}
it should "allow to select rows with not in clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b not in (1,2)").collect()
result should have length 0
}
it should "allow to select rows with is not null clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE b is not null").collect()
result should have length 8
}
it should "allow to select rows with like clause" in {
val result = cc.sql("SELECT * FROM test2 WHERE name LIKE '%om' ").collect()
result should have length 1
}
it should "allow to select rows with between clause" in {
val result = cc.sql("SELECT * FROM test2 WHERE a BETWEEN 1 AND 2 ").collect()
result should have length 6
}
it should "allow to select rows with alias" in {
val result = cc.sql("SELECT a AS a_column, b AS b_column FROM test2").collect()
result should have length 9
}
it should "allow to select rows with distinct column" in {
val result = cc.sql("SELECT DISTINCT a FROM test2").collect()
result should have length 3
}
it should "allow to select rows with limit clause" in {
val result = cc.sql("SELECT * FROM test1 limit 2").collect()
result should have length 2
}
it should "allow to select rows with order by clause" in {
val result = cc.sql("SELECT * FROM test1 order by d").collect()
result should have length 8
}
it should "allow to select rows with group by clause" in {
val result = cc.sql("SELECT count(*) FROM test1 GROUP BY b").collect()
result should have length 2
}
it should "allow to select rows with union clause" in {
val result = cc.sql("SELECT test1.a FROM sql_test.test1 AS test1 UNION DISTINCT SELECT test2.a FROM sql_test.test2 AS test2").collect()
result should have length 3
}
it should "allow to select rows with union distinct clause" in {
val result = cc.sql("SELECT test1.a FROM sql_test.test1 AS test1 UNION DISTINCT SELECT test2.a FROM sql_test.test2 AS test2").collect()
result should have length 3
}
it should "allow to select rows with union all clause" in {
val result = cc.sql("SELECT test1.a FROM sql_test.test1 AS test1 UNION ALL SELECT test2.a FROM sql_test.test2 AS test2").collect()
result should have length 17
}
it should "allow to select rows with having clause" in {
val result = cc.sql("SELECT count(*) FROM test1 GROUP BY b HAVING count(b) > 4").collect()
result should have length 0
}
it should "allow to select rows with partition column clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE a = 1 and b = 1 and c = 1").collect()
result should have length 4
}
it should "allow to select rows with partition column and cluster column clause" in {
val result = cc.sql("SELECT * FROM test1 WHERE a = 1 and b = 1 and c = 1 and d = 1 and e = 1").collect()
result should have length 1
}
it should "allow to insert into another table" in {
val result = cc.sql("INSERT INTO TABLE test3 SELECT a, b, c FROM test2").collect()
val result2 = cc.sql("SELECT a, b, c FROM test3").collect()
result2 should have length 9
}
it should "allow to insert into another table in different keyspace" in {
val result = cc.sql("INSERT INTO TABLE sql_test2.test3 SELECT test2.a, test2.b, test2.c FROM sql_test.test2 as test2").collect()
val result2 = cc.sql("SELECT test3.a, test3.b, test3.c FROM sql_test2.test3 as test3").collect()
result2 should have length 9
}
it should "allow to join two tables" in {
val result = cc.sql("SELECT test1.a, test1.b, test1.c, test2.a FROM sql_test.test1 AS test1 " +
"JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 4
}
it should "allow to join two tables from different keyspaces" in {
val result = cc.sql("SELECT test1.a, test1.b, test1.c, test2.a FROM sql_test.test1 AS test1 " +
"JOIN sql_test2.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 4
}
it should "allow to inner join two tables" in {
val result = cc.sql("SELECT test1.a, test1.b, test1.c, test2.a FROM sql_test.test1 AS test1 " +
"INNER JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 4
}
it should "allow to left join two tables" in {
val result = cc.sql("SELECT test1.a, test1.b, test1.c, test1.d, test1.e, test1.f FROM sql_test.test1 AS test1 " +
"LEFT JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 8
}
it should "allow to left outer join two tables" in {
val result = cc.sql("SELECT test1.a, test1.b, test1.c, test1.d, test1.e, test1.f FROM sql_test.test1 AS test1 " +
"LEFT OUTER JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 8
}
it should "allow to right join two tables" in {
val result = cc.sql("SELECT test2.a, test2.b, test2.c FROM sql_test.test1 AS test1 " +
"RIGHT JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 12
}
it should "allow to right outer join two tables" in {
val result = cc.sql("SELECT test2.a, test2.b, test2.c FROM sql_test.test1 AS test1 " +
"RIGHT OUTER JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 12
}
it should "allow to full join two tables" in {
val result = cc.sql("SELECT test2.a, test2.b, test2.c FROM sql_test.test1 AS test1 " +
"FULL JOIN sql_test.test2 AS test2 ON test1.a = test2.a AND test1.b = test2.b AND test1.c = test2.c").collect()
result should have length 16
}
it should "allow to select rows for collection columns" in {
val result = cc.sql("SELECT * FROM test_collection").collect()
result should have length 1
}
it should "allow to select rows for data types of ASCII, INT, FLOAT, DOUBLE, BIGINT, BOOLEAN, DECIMAL, INET, TEXT, TIMESTAMP, UUID, VARINT" in {
val result = cc.sql("SELECT * FROM test_data_type").collect()
result should have length 1
}
it should "allow to insert rows for data types of ASCII, INT, FLOAT, DOUBLE, BIGINT, BOOLEAN, DECIMAL, INET, TEXT, TIMESTAMP, UUID, VARINT" in {
val result = cc.sql("INSERT INTO TABLE test_data_type1 SELECT * FROM test_data_type").collect()
val result1 = cc.sql("SELECT * FROM test_data_type1").collect()
result1 should have length 1
}
it should "allow to select specified non-UDT columns from a table containing some UDT columns" in {
val result = cc.sql("SELECT key, name FROM udts").collect()
result should have length 1
val row = result.head
row.getInt(0) should be(1)
row.getString(1) should be ("name")
}
//TODO: SPARK-9269 is opened to address Set matching issue. I change the Set data type to List for now
it should "allow to select UDT collection column and nested UDT column" in {
val cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
val result = cc
.read
.format("org.apache.spark.sql.cassandra")
.options(
Map(
"table" -> "objects",
"keyspace" -> "sql_test"
)
)
.load()
.collect()
result should have length 1
}
it should "allow writing UDTs to C* tables" in {
val cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
val result = cc
.read
.format("org.apache.spark.sql.cassandra")
.options(
Map(
"table" -> "objects",
"keyspace" -> "sql_test"
)
)
.load()
.write
.format("org.apache.spark.sql.cassandra")
.options(
Map(
"table" -> "objects_copy",
"keyspace" -> "sql_test"
)
).save()
}
// Regression test for #454: java.util.NoSuchElementException thrown when accessing timestamp field using CassandraSQLContext
it should "allow to restrict a clustering timestamp column value" in {
conn.withSessionDo { session =>
session.execute("create table sql_test.export_table(objectid int, utcstamp timestamp, service_location_id int, " +
"service_location_name text, meterid int, primary key(meterid, utcstamp))")
}
val cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
cc.cassandraSql("select objectid, meterid, utcstamp from export_table where meterid = 4317 and utcstamp > '2013-07-26 20:30:00-0700'").collect()
}
it should "allow to min/max timestamp column" in {
conn.withSessionDo { session =>
session.execute("create table sql_test.timestamp_conversion_bug (k int, v int, d timestamp, primary key(k,v))")
session.execute("insert into sql_test.timestamp_conversion_bug (k, v, d) values (1, 1, '2015-01-03 15:13')")
session.execute("insert into sql_test.timestamp_conversion_bug (k, v, d) values (1, 2, '2015-01-03 16:13')")
session.execute("insert into sql_test.timestamp_conversion_bug (k, v, d) values (1, 3, '2015-01-03 17:13')")
session.execute("insert into sql_test.timestamp_conversion_bug (k, v, d) values (1, 4, '2015-01-03 18:13')")
}
val cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
cc.cassandraSql("select k, min(d), max(d) from timestamp_conversion_bug group by k").collect()
}
it should "be able to push down filter on UUID and Inet columns" in {
conn.withSessionDo { session =>
session.execute("create table sql_test.uuid_inet_type (a UUID, b INET, c INT, primary key(a,b))")
session.execute("insert into sql_test.uuid_inet_type (a, b, c) " +
"values (123e4567-e89b-12d3-a456-426655440000,'74.125.239.135', 1)")
session.execute("insert into sql_test.uuid_inet_type (a, b, c) " +
"values (067e6162-3b6f-4ae2-a171-2470b63dff00, '74.125.239.136', 2)")
}
val cc = new CassandraSQLContext(sc)
cc.setKeyspace("sql_test")
val result = cc.cassandraSql(
"select * " +
"from uuid_inet_type " +
"where b > '74.125.239.135'").collect()
result should have length 1
val result1 = cc.cassandraSql(
"select * " +
"from uuid_inet_type " +
"where a < '123e4567-e89b-12d3-a456-426655440000'").collect()
result1 should have length 1
val result2 = cc.cassandraSql(
"select * " +
"from uuid_inet_type " +
"where a = '123e4567-e89b-12d3-a456-426655440000' and b = '74.125.239.135'").collect()
result2 should have length 1
}
it should "be able to push down filter on varint columns" in {
conn.withSessionDo { session =>
session.execute(
s"""
|CREATE TABLE sql_test.varint_test(
| id varint,
| series varint,
| rollup_minutes varint,
| event text,
| PRIMARY KEY ((id, series, rollup_minutes), event)
|)
""".stripMargin.replaceAll("\\n", " "))
session.execute(
s"""
|INSERT INTO sql_test.varint_test(id, series, rollup_minutes, event)
|VALUES(1234567891234, 1234567891235, 1234567891236, 'event')
""".stripMargin.replaceAll("\\n", " "))
}
val cc = new CassandraSQLContext(sc)
cc.sql(
s"""
|SELECT * FROM sql_test.varint_test
|WHERE id = 1234567891234
| AND series = 1234567891235
| AND rollup_minutes = 1234567891236
""".stripMargin.replaceAll("\\n", " ")
).collect() should have length 1
}
}
|
viirya/spark-cassandra-connector
|
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/sql/CassandraSQLSpec.scala
|
Scala
|
apache-2.0
| 21,463
|
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl.dsl.fsm
import org.squeryl.dsl.{BinaryAMSOp, NonNumericalCoalesce, NonNumericalExpression, NumericalExpression}
import org.squeryl.dsl.ast.{ConstantExpressionNode, TypedExpressionNode}
class CaseOfNumericalExpressionMatchStart[A](toMatch: NumericalExpression[A]) {
def when[B,C,D](m: NumericalExpression[B], r: NumericalExpression[D])
(implicit ev1: NonNumericalCoalesce[A,B] => NonNumericalExpression[C]) =
new CaseOfNumericalExpressionMatchYieldingNumerical[C,D](m.asInstanceOf[NumericalExpression[C]], r, None, Some(toMatch))
def when[B,C,D](m: NumericalExpression[B], r: NonNumericalExpression[D])
(implicit ev1: NonNumericalCoalesce[A,B] => NonNumericalExpression[C]) =
new CaseOfNumericalExpressionMatchYieldingNonNumerical[C,D](m.asInstanceOf[NumericalExpression[C]], r, None, Some(toMatch))
}
class CaseOfNumericalExpressionMatchYieldingNumerical[A,T](
val whenArg: TypedExpressionNode[A],
val thenArg: NumericalExpression[T],
val previous: Option[CaseOfChain],
val expressionToMatch: Option[TypedExpressionNode[_]] = None)
extends CaseOfChain {
def when[B,C,U,V](m: NumericalExpression[B], r: NumericalExpression[U])
(implicit ev1: BinaryAMSOp[A,B] => NumericalExpression[C],
ev2: BinaryAMSOp[T,U] => NumericalExpression[V]) =
new CaseOfNumericalExpressionMatchYieldingNumerical[C,V](m.asInstanceOf[NumericalExpression[C]], r.asInstanceOf[NumericalExpression[V]], Some(this))
def otherwise[U,V](r: NumericalExpression[U])
(implicit ev1: BinaryAMSOp[T,U] => NumericalExpression[V]) = {
val o = new BinaryAMSOp(thenArg, r, "!CaseOfNumericalExpressionMatchYieldingNumerical") : NumericalExpression[V]
new CaseOfChainNumericalTermination[V](o.mapper, r, this)
}
}
class CaseOfNumericalExpressionMatchYieldingNonNumerical[A,T](
val whenArg: TypedExpressionNode[A],
val thenArg: NonNumericalExpression[T],
val previous: Option[CaseOfChain],
val expressionToMatch: Option[TypedExpressionNode[_]] = None)
extends CaseOfChain {
def when[B,C,U,V](m: NumericalExpression[B], r: NonNumericalExpression[U])
(implicit ev1: BinaryAMSOp[A,B] => NumericalExpression[C],
ev2: NonNumericalCoalesce[T,U] => NonNumericalExpression[V]) =
new CaseOfNumericalExpressionMatchYieldingNonNumerical[C,V](m.asInstanceOf[NumericalExpression[C]], r.asInstanceOf[NonNumericalExpression[V]], Some(this), None)
def otherwise[U,V](r: NonNumericalExpression[U])
(implicit ev2: NonNumericalCoalesce[T,U] => NonNumericalExpression[V]) = {
val c = new NonNumericalCoalesce[T,U](thenArg, r, "!CaseOfNumericalExpressionMatchYieldingNonNumerical") : NonNumericalExpression[V]
new CaseOfChainNonNumericalTermination[V](c.mapper, r, this)
}
}
|
takezoux2/squeryl-experimental
|
src/main/scala/org/squeryl/dsl/fsm/CaseOfNumericalExpressionMatchStart.scala
|
Scala
|
apache-2.0
| 3,637
|
package kidstravel.client.logger
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
/**
* Facade for functions in log4javascript that we need
*/
@js.native
private[logger] trait Log4JavaScript extends js.Object {
def getLogger(name:js.UndefOr[String]):JSLogger = js.native
def setEnabled(enabled:Boolean):Unit = js.native
def isEnabled:Boolean = js.native
}
@js.native
@JSName("log4javascript.Level")
private[logger] trait Level extends js.Object {
val ALL:Level = js.native
val TRACE:Level = js.native
val DEBUG:Level = js.native
val INFO:Level = js.native
val WARN:Level = js.native
val ERROR:Level = js.native
val FATAL:Level = js.native
}
@js.native
@JSName("log4javascript.Logger")
private[logger] trait JSLogger extends js.Object {
def addAppender(appender:Appender):Unit = js.native
def removeAppender(appender:Appender):Unit = js.native
def removeAllAppenders(appender:Appender):Unit = js.native
def setLevel(level:Level):Unit = js.native
def getLevel:Level = js.native
def trace(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def debug(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def info(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def warn(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def error(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def fatal(msg:String, error:js.UndefOr[js.Error]):Unit = js.native
def trace(msg:String):Unit = js.native
def debug(msg:String):Unit = js.native
def info(msg:String):Unit = js.native
def warn(msg:String):Unit = js.native
def error(msg:String):Unit = js.native
def fatal(msg:String):Unit = js.native
}
@js.native
@JSName("log4javascript.Layout")
private[logger] trait Layout extends js.Object
@js.native
@JSName("log4javascript.JsonLayout")
private[logger] class JsonLayout extends Layout
@js.native
@JSName("log4javascript.Appender")
private[logger] trait Appender extends js.Object {
def setLayout(layout:Layout):Unit = js.native
def setThreshold(level:Level):Unit = js.native
}
@js.native
@JSName("log4javascript.BrowserConsoleAppender")
private[logger] class BrowserConsoleAppender extends Appender
@js.native
@JSName("log4javascript.PopUpAppender")
private[logger] class PopUpAppender extends Appender
@js.native
@JSName("log4javascript.AjaxAppender")
private[logger] class AjaxAppender(url:String) extends Appender {
def addHeader(header:String, value:String):Unit = js.native
}
@js.native
private[logger] object Log4JavaScript extends js.GlobalScope {
val log4javascript:Log4JavaScript = js.native
}
class L4JSLogger(jsLogger:JSLogger) extends Logger {
private var ajaxAppender:AjaxAppender = null
private def undefOrError(e:Exception):js.UndefOr[js.Error] = {
if(e == null)
js.undefined
else
e.asInstanceOf[js.Error]
}
override def trace(msg: String, e: Exception): Unit = jsLogger.trace(msg, undefOrError(e))
override def trace(msg: String): Unit = jsLogger.trace(msg)
override def debug(msg: String, e: Exception): Unit = jsLogger.debug(msg, undefOrError(e))
override def debug(msg: String): Unit = jsLogger.debug(msg)
override def info(msg: String, e: Exception): Unit = jsLogger.info(msg, undefOrError(e))
override def info(msg: String): Unit = jsLogger.info(msg)
override def warn(msg: String, e: Exception): Unit = jsLogger.warn(msg, undefOrError(e))
override def warn(msg: String): Unit = jsLogger.warn(msg)
override def error(msg: String, e: Exception): Unit = jsLogger.error(msg, undefOrError(e))
override def error(msg: String): Unit = jsLogger.error(msg)
override def fatal(msg: String, e: Exception): Unit = jsLogger.fatal(msg, undefOrError(e))
override def fatal(msg: String): Unit = jsLogger.fatal(msg)
override def enableServerLogging(url: String): Unit = {
if(ajaxAppender == null) {
ajaxAppender = new AjaxAppender(url)
ajaxAppender.addHeader("Content-Type", "application/json")
ajaxAppender.setLayout(new JsonLayout)
jsLogger.addAppender(ajaxAppender)
}
}
override def disableServerLogging():Unit = {
if(ajaxAppender != null) {
jsLogger.removeAppender(ajaxAppender)
ajaxAppender = null
}
}
}
|
devkat/kidstravel
|
client/src/main/scala/kidstravel/client/logger/Log4JavaScript.scala
|
Scala
|
apache-2.0
| 4,235
|
package com.toolkit.util.spark.hbase
/**
* Created by rahul on 08/02/15.
*/
object packageinfo {
//TODO : will implement later
}
|
rahulkumar-aws/logManagementToolkit
|
src/main/scala/com/toolkit/util/spark/hbase/packageinfo.scala
|
Scala
|
apache-2.0
| 132
|
package com.lljv.analytics.hadoopengine
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{StreamingContext, Seconds}
import scala.util.control.NonFatal
class SparkStreamEngine(val settings: SparkStreamSettings) extends Serializable {
val sparkConfig: Option[SparkConf] = try {
Some(new SparkConf()
.setAppName(settings.appName)
.setMaster(settings.masterNode)
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
// .registerKryoClasses(Array(classOf[AnalyzerEngine]))
)
} catch {
case NonFatal(exc) => {
// printf(exc.getMessage())
// TODO: logging
None
}
}
val streamingContext: Option[StreamingContext] = try {
Some(new StreamingContext(sparkConfig.get, Seconds(settings.updateIntervalSeconds)))
} catch {
case NonFatal(exc) => {
// printf(exc.getMessage())
// TODO: logging
None
}
}
}
|
dotdeb/Science-Finder
|
Analytics/HadoopEngine/src/main/scala/com/lljv/analytics/hadoopengine/SparkStreamEngine.scala
|
Scala
|
apache-2.0
| 933
|
package com.datastax.spark.connector.writer
import java.io.{OutputStream, ObjectOutputStream}
import java.nio.ByteBuffer
import scala.collection.JavaConversions._
import com.datastax.spark.connector.util.ByteBufferUtil
/** Estimates amount of memory required to serialize Java/Scala objects */
object ObjectSizeEstimator {
private def makeSerializable(obj: Any): AnyRef = {
obj match {
case bb: ByteBuffer => ByteBufferUtil.toArray(bb)
case list: java.util.List[_] => list.map(makeSerializable)
case list: List[_] => list.map(makeSerializable)
case set: java.util.Set[_] => set.map(makeSerializable)
case set: Set[_] => set.map(makeSerializable)
case map: java.util.Map[_, _] => map.map { case (k, v) => (makeSerializable(k), makeSerializable(v)) }
case map: Map[_, _] => map.map { case (k, v) => (makeSerializable(k), makeSerializable(v)) }
case other => other.asInstanceOf[AnyRef]
}
}
/** Records only how many bytes were written but the actual data is discarded */
private class CountingOutputStream extends OutputStream {
private var _length = 0
override def write(b: Int) = _length += 1
override def write(b: Array[Byte]) = _length += b.length
override def write(b: Array[Byte], off: Int, len: Int) = _length += len
def length = _length
}
/** Serializes passed objects and reports their total size */
def measureSerializedSize(objects: Seq[Any]): Int = {
val countingStream = new CountingOutputStream
val objectStream = new ObjectOutputStream(countingStream)
for (obj <- objects)
objectStream.writeObject(makeSerializable(obj))
objectStream.close()
countingStream.length
}
}
|
Stratio/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/writer/ObjectSizeEstimator.scala
|
Scala
|
apache-2.0
| 1,707
|
package lila.app
package templating
import play.twirl.api.Html
import lila.api.Context
import lila.forum.Post
trait ForumHelper { self: UserHelper with StringHelper =>
private object Granter extends lila.forum.Granter {
protected def userBelongsToTeam(teamId: String, userId: String): Boolean =
Env.team.api.belongsTo(teamId, userId)
protected def userOwnsTeam(teamId: String, userId: String): Fu[Boolean] =
Env.team.api.owns(teamId, userId)
}
def isGrantedRead(categSlug: String)(implicit ctx: Context) =
Granter isGrantedRead categSlug
def isGrantedWrite(categSlug: String)(implicit ctx: Context) =
Granter isGrantedWrite categSlug
def isGrantedMod(categSlug: String)(implicit ctx: Context) =
Granter.isGrantedMod(categSlug).await
def authorName(post: Post) = post.userId match {
case Some(userId) => userIdSpanMini(userId, withOnline = true)
case None => Html(lila.user.User.anonymous)
}
def authorLink(
post: Post,
cssClass: Option[String] = None,
withOnline: Boolean = true) = post.userId.fold(Html(lila.user.User.anonymous)) { userId =>
userIdLink(userId.some, cssClass = cssClass, withOnline = withOnline)
}
}
|
clarkerubber/lila
|
app/templating/ForumHelper.scala
|
Scala
|
agpl-3.0
| 1,210
|
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object laird extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template1[String,play.api.templates.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(message: String):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.19*/("""
"""),_display_(Seq[Any](/*3.2*/Main("Laird Hamilton")/*3.24*/ {_display_(Seq[Any](format.raw/*3.26*/("""
<div class="container">
<div class="row">
<div class="col-md-4 pagecontent">
<div class="well">
<div class="laird-img"></div>
</div>
</div>
<div class="col-md-8">
<div class="well">
<h3>Laird Hamilton</h3>
<p>Laird was born Laird John Zerfas in San Francisco on March 2, 1964, in an experimental salt-water sphere
at UCSF Medical Center designed to ease the mother's labor. His Greek birth father, L. G. Zerfas, left the
family before his first birthday. While he was an infant, Laird and his mother, Joann, moved to Hawaii. While
still a young boy living on Oahu, Laird met with 1960s surfer Bill Hamilton, a bachelor at the time, on
Pūpūkea beach on the North Shore. Bill Hamilton was a surfboard shaper and glasser on Oahu in the 1960s and
1970s and owned a small business handmaking custom, high-performance surfboards for the Oahu North Shore big
wave riders of the era. The two became immediate companions. The young Laird invited Bill Hamilton home to
meet his mother. Bill Hamilton married Laird's then-single mother, becoming Laird's adoptive father.</p>
<p>The family later moved to a remote valley on Kauaʻi island. Joann and Bill had a second son, Lyon,
Laird's half-brother, who also became a surfer. Laird's mother died of a brain aneurysm in 1997.</p>
<p>Hamilton had a reputation for an aggressive demeanor around others of his age. This hostile attitude was
in part due to Laird and his brother Lyon being bigger than their classmates, fair-skinned, and blonde:
unusual in their predominantly Hawaiian-populated neighborhood. The role of the outsider profoundly affected
Laird through to his teen years and early adult life. He became used to this role and was uncomfortable being
in the center of anything. He was also known for his physical and mental toughness. Young Laird is shown in
early video footage jumping off a 60-foot cliff into deep water at 7 years of age.</p>
</div>
</div>
</div>
</div>
""")))})),format.raw/*38.2*/("""
"""))}
}
def render(message:String): play.api.templates.HtmlFormat.Appendable = apply(message)
def f:((String) => play.api.templates.HtmlFormat.Appendable) = (message) => apply(message)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Wed Oct 09 10:52:49 HST 2013
SOURCE: C:/Users/Diana/Desktop/surferpedia/app/views/laird.scala.html
HASH: ca88c47dff5ec27590cd10932c647b8c87c3bb0a
MATRIX: 774->1|885->18|925->24|955->46|994->48|3201->2224
LINES: 26->1|29->1|31->3|31->3|31->3|66->38
-- GENERATED --
*/
|
MattCCieslak/surferpedia
|
target/scala-2.10/src_managed/main/views/html/laird.template.scala
|
Scala
|
mit
| 3,894
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.epoch
import java.io.File
import java.util.concurrent.atomic.AtomicBoolean
import kafka.cluster.Replica
import kafka.server._
import kafka.utils.{MockTime, TestUtils}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.EpochEndOffset
import org.apache.kafka.common.requests.EpochEndOffset._
import org.easymock.EasyMock._
import org.junit.Assert._
import org.junit.Test
class OffsetsForLeaderEpochTest {
private val config = TestUtils.createBrokerConfigs(1, TestUtils.MockZkConnect).map(KafkaConfig.fromProps).head
private val time = new MockTime
private val metrics = new Metrics
private val tp = new TopicPartition("topic", 1)
@Test
def shouldGetEpochsFromReplica(): Unit = {
//Given
val offset = 42
val epochRequested: Integer = 5
val request = Map(tp -> epochRequested)
//Stubs
val mockLog = createNiceMock(classOf[kafka.log.Log])
val mockCache = createNiceMock(classOf[kafka.server.epoch.LeaderEpochCache])
val logManager = createNiceMock(classOf[kafka.log.LogManager])
expect(mockCache.endOffsetFor(epochRequested)).andReturn(offset)
expect(mockLog.leaderEpochCache).andReturn(mockCache).anyTimes()
expect(logManager.liveLogDirs).andReturn(Array.empty[File]).anyTimes()
replay(mockCache, mockLog, logManager)
// create a replica manager with 1 partition that has 1 replica
val replicaManager = new ReplicaManager(config, metrics, time, null, null, logManager, new AtomicBoolean(false),
QuotaFactory.instantiate(config, metrics, time, ""), new BrokerTopicStats,
new MetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size))
val partition = replicaManager.getOrCreatePartition(tp)
val leaderReplica = new Replica(config.brokerId, partition.topicPartition, time, 0, Some(mockLog))
partition.addReplicaIfNotExists(leaderReplica)
partition.leaderReplicaIdOpt = Some(config.brokerId)
//When
val response = replicaManager.lastOffsetForLeaderEpoch(request)
//Then
assertEquals(new EpochEndOffset(Errors.NONE, offset), response(tp))
}
@Test
def shouldReturnNoLeaderForPartitionIfThrown(): Unit = {
val logManager = createNiceMock(classOf[kafka.log.LogManager])
expect(logManager.liveLogDirs).andReturn(Array.empty[File]).anyTimes()
replay(logManager)
//create a replica manager with 1 partition that has 0 replica
val replicaManager = new ReplicaManager(config, metrics, time, null, null, logManager, new AtomicBoolean(false),
QuotaFactory.instantiate(config, metrics, time, ""), new BrokerTopicStats,
new MetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size))
replicaManager.getOrCreatePartition(tp)
//Given
val epochRequested: Integer = 5
val request = Map(tp -> epochRequested)
//When
val response = replicaManager.lastOffsetForLeaderEpoch(request)
//Then
assertEquals(new EpochEndOffset(Errors.NOT_LEADER_FOR_PARTITION, UNDEFINED_EPOCH_OFFSET), response(tp))
}
@Test
def shouldReturnUnknownTopicOrPartitionIfThrown(): Unit = {
val logManager = createNiceMock(classOf[kafka.log.LogManager])
expect(logManager.liveLogDirs).andReturn(Array.empty[File]).anyTimes()
replay(logManager)
//create a replica manager with 0 partition
val replicaManager = new ReplicaManager(config, metrics, time, null, null, logManager, new AtomicBoolean(false),
QuotaFactory.instantiate(config, metrics, time, ""), new BrokerTopicStats,
new MetadataCache(config.brokerId), new LogDirFailureChannel(config.logDirs.size))
//Given
val epochRequested: Integer = 5
val request = Map(tp -> epochRequested)
//When
val response = replicaManager.lastOffsetForLeaderEpoch(request)
//Then
assertEquals(new EpochEndOffset(Errors.UNKNOWN_TOPIC_OR_PARTITION, UNDEFINED_EPOCH_OFFSET), response(tp))
}
}
|
themarkypantz/kafka
|
core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala
|
Scala
|
apache-2.0
| 4,840
|
package notebook.client
import java.io.File
import akka.actor.{Actor, ActorRef, Props}
import notebook.OutputTypes._
import notebook.PresentationCompiler
import notebook.kernel._
import notebook.JobTracking
import notebook.kernel.repl.common.ReplT
import notebook.util.{CustomResolvers, Deps}
import org.joda.time.LocalDateTime
import sbt._
import scala.collection.immutable.Queue
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure => TFailure, Success => TSuccess}
/**
* @param _initScripts List of scala source strings to be executed during REPL startup.
* @param customSparkConf Map configuring the notebook (spark configuration).
* @param compilerArgs Command line arguments to pass to the REPL compiler
*/
class ReplCalculator(
notebookName:String,
customLocalRepo: Option[String],
customRepos: Option[List[String]],
customDeps: Option[List[String]],
customImports: Option[List[String]],
customArgs: Option[List[String]],
customSparkConf: Option[Map[String, String]],
remoteActor:ActorRef,
_initScripts: List[(String, String)],
compilerArgs: List[String]
) extends Actor with akka.actor.ActorLogging {
private val remoteLogger = context.actorSelection("/user/remote-logger")
remoteLogger ! remoteActor
private val repoRegex = "(?s)^:local-repo\\\\s*(.+)\\\\s*$".r
private val resolverRegex = "(?s)^:remote-repo\\\\s*(.+)\\\\s*$".r
private val authRegex = """(?s)^\\s*\\(([^\\)]+)\\)\\s*$""".r
private val credRegex = """"([^"]+)"\\s*,\\s*"([^"]+)"""".r //"
private def outputTypesRegex(ctx: String, outputType: String) = {
s"(?s)^:$ctx\\\\s*\\n(.+)\\\\s*$$".r → outputType
}
private val htmlContext = outputTypesRegex("html", `text/html`)
private val plainContext = outputTypesRegex("plain", `text/plain`)
private val markdownContext = outputTypesRegex("markdown", `text/markdown`)
private val latexContext = outputTypesRegex("latex", `text/latex`)
private val svgContext = outputTypesRegex("svg", `image/svg+xml`)
private val pngContext = outputTypesRegex("png", `image/png`)
private val jpegContext = outputTypesRegex("jpeg", `image/jpeg`)
private val pdfContext = outputTypesRegex("pdf", `application/pdf`)
private val javascriptContext = outputTypesRegex("javascript", `application/javascript`)
private val cpRegex = "(?s)^:cp\\\\s*(.+)\\\\s*$".r
private val dpRegex = "(?s)^:(l?)dp\\\\s*(.+)\\\\s*$".r
private val sqlRegex = "(?s)^:sql(?:\\\\[([a-zA-Z0-9][a-zA-Z0-9]*)\\\\])?\\\\s*(.+)\\\\s*$".r
private val shRegex = "(?s)^:sh\\\\s*(.+)\\\\s*$".r
// note: the resolver list is a superset of Spark's list in o.a.spark.deploy.SparkSubmit
// except that the local ivy repo isn't included
private var resolvers: List[Resolver] = {
val mavenLocal = Resolver.mavenLocal
val defaultLocal = Resolver.defaultLocal
val local = {
val pats = List(
sys.props("user.home") + "/.ivy2/" + "/local/" + Resolver.localBasePattern,
sys.props("user.home") + "/.ivy2/" + "/cache/" + Resolver.localBasePattern
)
FileRepository("snb-local", Resolver.defaultFileConfiguration, Patterns(pats, pats, false))
}
val defaultShared = Resolver.defaultShared
val mavenReleases = sbt.DefaultMavenRepository
val typesafeReleases = Resolver.typesafeIvyRepo("releases")
val jCenterReleases = Resolver.jcenterRepo
val sonatypeReleases = Resolver.sonatypeRepo("releases")
val spReleases = new MavenRepository("spark-packages", "http://dl.bintray.com/spark-packages/maven/")
val defaults = defaultLocal :: local :: mavenLocal :: defaultShared :: mavenReleases :: spReleases :: typesafeReleases :: jCenterReleases :: sonatypeReleases :: Nil
customRepos.getOrElse(List.empty[String]).map(CustomResolvers.fromString).map(_._2) ::: defaults
}
private var repo: File = customLocalRepo.map { x =>
new File(notebook.util.StringUtils.updateWithVarEnv(x))
}.getOrElse {
val tmp = new File(System.getProperty("java.io.tmpdir"))
val snb = new File(tmp, "spark-notebook")
if (!snb.exists) snb.mkdirs
val repo = new File(snb, "repo")
if (!repo.exists) repo.mkdirs
val r = new File(repo, java.util.UUID.randomUUID.toString)
if (!r.exists) r.mkdirs
r
}
def codeRepo = new File(repo, "code")
val (depsJars, depsScript): (List[String], (String, () => String)) = customDeps.map { d =>
val customDeps = d.mkString("\\n")
val deps = Deps.script(customDeps, resolvers, repo).toOption.getOrElse(List.empty[String])
(deps, ("deps", () => {
s"""
|val CustomJars = ${deps.mkString("Array(\\"", "\\",\\"", "\\")").replace("\\\\","\\\\\\\\")}
""".stripMargin
}))
}.getOrElse((List.empty[String], ("deps", () => "val CustomJars = Array.empty[String]\\n")))
val ImportsScripts = ("imports", () => customImports.map(_.mkString("\\n") + "\\n").getOrElse("\\n"))
private var _repl: Option[ReplT] = None
private def repl: ReplT = _repl getOrElse {
val r = new Repl(compilerArgs, depsJars)
_repl = Some(r)
r
}
private var _presentationCompiler: Option[PresentationCompiler] = None
private def presentationCompiler: PresentationCompiler = _presentationCompiler getOrElse {
val r = new PresentationCompiler(depsJars)
_presentationCompiler = Some(r)
r
}
val chat = new notebook.front.gadgets.Chat()
// +/- copied of https://github.com/scala/scala/blob/v2.11.4/src%2Flibrary%2Fscala%2Fconcurrent%2Fduration%2FDuration.scala
final def toCoarsest(d: FiniteDuration): String = {
def loop(length: Long, unit: TimeUnit, acc: String): String = {
def coarserOrThis(coarser: TimeUnit, divider: Int) = {
if (length == divider)
loop(1, coarser, acc)
else if (length < divider)
FiniteDuration(length, unit).toString + " " + acc
else {
val _acc = if (length % divider == 0) {
acc
} else {
FiniteDuration(length % divider, unit).toString + " " + acc
}
loop(length / divider, coarser, _acc)
}
}
unit match {
case DAYS => d.toString + " " + acc
case HOURS => coarserOrThis(DAYS, 24)
case MINUTES => coarserOrThis(HOURS, 60)
case SECONDS => coarserOrThis(MINUTES, 60)
case MILLISECONDS => coarserOrThis(SECONDS, 1000)
case MICROSECONDS => coarserOrThis(MILLISECONDS, 1000)
case NANOSECONDS => coarserOrThis(MICROSECONDS, 1000)
}
}
if (d.unit == DAYS || d.length == 0) d.toString()
else loop(d.length, d.unit, "").trim
}
// Make a child actor so we don't block the execution on the main thread, so that interruption can work
private val executor = context.actorOf(Props(new Actor {
implicit val ec = context.dispatcher
private var queue: Queue[(ActorRef, ExecuteRequest)] = Queue.empty
private var currentlyExecutingTask: Option[Future[(String, EvaluationResult)]] = None
def eval(b: => String, notify: Boolean = true)(success: => String = "",
failure: String => String = (s: String) => "Error evaluating " + b + ": " + s) {
repl.evaluate(b)._1 match {
case Failure(str) =>
if (notify) {
eval( """""", notify = false)()
}
log.error(failure(str))
case _ =>
if (notify) {
eval( """""", notify = false)()
}
log.info(success)
}
}
def receive = {
case "process-next" =>
log.debug(s"Processing next asked, queue is ${queue.size} length now")
currentlyExecutingTask = None
if (queue.nonEmpty) { //queue could be empty if InterruptRequest was asked!
log.debug("Dequeuing execute request current size: " + queue.size)
val (executeRequest, queueTail) = queue.dequeue
queue = queueTail
val (ref, er) = executeRequest
log.debug("About to execute request from the queue")
execute(ref, er)
}
case er@ExecuteRequest(_, _, code) =>
log.debug("Enqueuing execute request at: " + queue.size)
queue = queue.enqueue((sender(), er))
// if queue contains only the new task, and no task is currently executing, execute it straight away
// otherwise the execution will start once the evaluation of earlier cell(s) finishes
if (currentlyExecutingTask.isEmpty && queue.size == 1) {
self ! "process-next"
}
case InterruptCellRequest(killCellId) =>
// kill job(s) still waiting for execution to start, if any
val (jobsInQueueToKill, nonAffectedJobs) = queue.partition { case (_, ExecuteRequest(cellIdInQueue, _, _)) =>
cellIdInQueue == killCellId
}
log.debug(s"Canceling $killCellId jobs still in queue (if any):\\n $jobsInQueueToKill")
queue = nonAffectedJobs
log.debug(s"Interrupting the cell: $killCellId")
val jobGroupId = JobTracking.jobGroupId(killCellId)
// make sure sparkContext is already available!
if (jobsInQueueToKill.isEmpty && repl.sparkContextAvailable) {
log.info(s"Killing job Group $jobGroupId")
val thisSender = sender()
repl.evaluate(
s"""sparkContext.cancelJobGroup("${jobGroupId}")""",
msg => thisSender ! StreamResponse(msg, "stdout")
)
}
// StreamResponse shows error msg
sender() ! StreamResponse("The cell was cancelled.\\n", "stderr")
// ErrorResponse to marks cell as ended
sender() ! ErrorResponse("The cell was cancelled.\\n", incomplete = false)
case InterruptRequest =>
log.debug("Interrupting the spark context")
val thisSender = sender()
log.debug("Clearing the queue of size " + queue.size)
queue = scala.collection.immutable.Queue.empty
repl.evaluate(
"globalScope.sparkContext.cancelAllJobs()",
msg => {
thisSender ! StreamResponse(msg, "stdout")
}
)
}
def execute(sender: ActorRef, er: ExecuteRequest): Unit = {
val (outputType, newCode) = er.code match {
case resolverRegex(r) =>
log.debug("Adding resolver: " + r)
val (logR, resolver) = CustomResolvers.fromString(r)
resolvers = resolver :: resolvers
(`text/plain`, s""" "Resolver added: $logR!" """)
case repoRegex(r) =>
log.debug("Updating local repo: " + r)
repo = new File(r.trim)
repo.mkdirs
(`text/plain`, s""" "Repo changed to ${repo.getAbsolutePath}!" """)
case dpRegex(local, cp) =>
log.debug(s"Fetching ${if(local == "l") "locally" else ""} deps using repos: " + resolvers.mkString(" -- "))
val tryDeps = Deps.script(cp, resolvers, repo)
tryDeps match {
case TSuccess(deps) =>
eval( """sparkContext.stop() """)("CP reload processed successfully",
(str: String) => "Error in :dp: \\n%s".format(str)
)
val (_r, replay) = repl.addCp(deps)
_repl = Some(_r)
preStartLogic()
replay()
val newJarList = if (local == "l") {
"Nil"
} else {
deps.map(x => x.replaceAll("\\\\\\\\", "\\\\\\\\\\\\\\\\")).mkString("List(\\"", "\\",\\"", "\\")")
}
(`text/html`,
s"""
|//updating deps
|globalScope.jars = ($newJarList ::: globalScope.jars.toList).distinct.toArray
|//restarting spark
|reset()
|globalScope.jars.toList
""".stripMargin
)
case TFailure(ex) =>
log.error(ex, "Cannot add dependencies")
(`text/html`, s""" <p style="color:red">${ex.getMessage}</p> """)
}
case cpRegex(cp) =>
val jars = cp.trim().split("\\n").toList.map(_.trim()).filter(_.length > 0)
repl.evaluate( """sparkContext.stop()""")._1 match {
case Failure(str) =>
log.error("Error in :cp: \\n%s".format(str))
case _ =>
log.info("CP reload processed successfully")
}
val (_r, replay) = repl.addCp(jars)
_repl = Some(_r)
preStartLogic()
replay()
val newJarList = jars.map(x => x.replaceAll("\\\\\\\\", "\\\\\\\\\\\\\\\\")).mkString("List(\\"", "\\",\\"", "\\")")
(`text/html`,
s"""
|//updating deps
|globalScope.jars = ($newJarList ::: globalScope.jars.toList).distinct.toArray
|//restarting spark
|reset()
|globalScope.jars.toList
""".stripMargin
)
case shRegex(sh) =>
val ps = "s\\"\\"\\"" + sh.replaceAll("\\\\s*\\\\|\\\\s*", "\\" #\\\\| \\"").replaceAll("\\\\s*&&\\\\s*", "\\" #&& \\"") + "\\"\\"\\""
val shCode =
s"""|import sys.process._
|println($ps.!!(ProcessLogger(out => (), err => println(err))))
|()
|""".stripMargin.trim
log.debug(s"Generated SH code: $shCode")
(`text/plain`, shCode)
case sqlRegex(n, sql) =>
log.debug(s"Received sql code: [$n] $sql")
val qs = "\\"\\"\\""
val name = Option(n).map(nm => s"@transient val $nm = ").getOrElse("")
(`text/html`,
s"""
import notebook.front.widgets.Sql
import notebook.front.widgets.Sql._
${name}new Sql(sqlContext, s$qs$sql$qs)
"""
)
case htmlContext._1(content) =>
val ctx = htmlContext._2
val c = content.toString.replaceAll("\\"", """)
(ctx, " scala.xml.XML.loadString(s\\"\\"\\"" + c + "\\"\\"\\") ")
case plainContext._1(content) =>
val ctx = plainContext._2
val c = content.toString.replaceAll("\\"", "\\\\\\\\\\\\\\"")
(ctx, " s\\"\\"\\"" + c + "\\"\\"\\" ")
case markdownContext._1(content) =>
val ctx = markdownContext._2
val c = content.toString.replaceAll("\\\\\\"", "\\"")
(ctx, " s\\"\\"\\"" + c + "\\"\\"\\" ")
case latexContext._1(content) =>
val ctx = latexContext._2
val c = content.toString.replaceAll("\\\\\\"", "\\"")
(ctx, " s\\"\\"\\"" + c + "\\"\\"\\" ")
case svgContext._1(content) =>
val ctx = svgContext._2
val c = content.toString.replaceAll("\\"", """)
(ctx, " scala.xml.XML.loadString(s\\"\\"\\"" + c + "\\"\\"\\") ")
case pngContext._1(content) =>
val ctx = pngContext._2
(ctx, content.toString)
case jpegContext._1(content) =>
val ctx = jpegContext._2
(ctx, content.toString)
case pdfContext._1(content) =>
val ctx = pdfContext._2
(ctx, content.toString)
case javascriptContext._1(content) =>
val ctx = javascriptContext._2
val c = content.toString //.replaceAll("\\"", "\\\\\\"")
(ctx, " s\\"\\"\\"" + c + "\\"\\"\\" ")
case whatever => (`text/html`, whatever)
}
val start = System.currentTimeMillis
val thisSelf = self
val thisSender = sender
val result = scala.concurrent.Future {
// this future is required to allow InterruptRequest messages to be received and process
// so that spark jobs can be killed and the hand given back to the user to refine their tasks
val cellId = er.cellId
def replEvaluate(code:String, cellId:String) = {
val cellResult = try {
repl.evaluate(s"""
|sparkContext.setJobGroup("${JobTracking.jobGroupId(cellId)}", "${JobTracking.jobDescription(code, start)}")
|$code
""".stripMargin,
msg => thisSender ! StreamResponse(msg, "stdout"),
nameDefinition => thisSender ! nameDefinition
)
}
finally {
repl.evaluate("sparkContext.clearJobGroup()")
}
cellResult
}
val result = replEvaluate(newCode, cellId)
val d = toCoarsest(Duration(System.currentTimeMillis - start, MILLISECONDS))
(d, result._1)
}
currentlyExecutingTask = Some(result)
result foreach {
case (timeToEval, Success(result)) =>
val evalTimeStats = s"Took: $timeToEval, at ${new LocalDateTime().toString("Y-M-d H:m")}"
thisSender ! ExecuteResponse(outputType, result.toString(), evalTimeStats)
case (timeToEval, Failure(stackTrace)) =>
thisSender ! ErrorResponse(stackTrace, incomplete = false)
case (timeToEval, notebook.kernel.Incomplete) =>
thisSender ! ErrorResponse("Incomplete (hint: check the parenthesis)", incomplete = true)
}
result onComplete {
_ => thisSelf ! "process-next"
}
}
}))
def preStartLogic() {
log.info("ReplCalculator preStart")
val dummyScript = ("dummy", () => s"""val dummy = ();\\n""")
val SparkHookScript = (
"class server",
() => s"""@transient val _5C4L4_N0T3800K_5P4RK_HOOK = "${repl.classServerUri.get}";\\n"""
)
// Must escape last remaining '\\', which could be for windows paths.
val nbName = notebookName.replaceAll("\\"", "").replace("\\\\", "\\\\\\\\")
val SparkConfScript = {
val m = customSparkConf .getOrElse(Map.empty[String, String])
m .map { case (k, v) =>
"( \\"" + k + "\\" → \\"" + v + "\\" )"
}.mkString(",")
}
val CustomSparkConfFromNotebookMD = ("custom conf", () => s"""
|@transient val notebookName = "$nbName"
|@transient val _5C4L4_N0T3800K_5P4RK_C0NF:Map[String, String] = Map(
| $SparkConfScript
|)\\n
""".stripMargin
)
def eval(script: () => String): Option[String] = {
val sc = script()
log.debug("script is :\\n" + sc)
if (sc.trim.length > 0) {
val (result, _) = repl.evaluate(sc)
result match {
case Failure(str) =>
log.error("Error in init script: \\n%s".format(str))
None
case _ =>
if (log.isDebugEnabled) log.debug("\\n" + sc)
log.info("Init script processed successfully")
Some(sc)
}
} else None
}
val allInitScrips: List[(String, () => String)] = dummyScript ::
SparkHookScript ::
depsScript ::
ImportsScripts ::
CustomSparkConfFromNotebookMD ::
( _initScripts ::: repl.endInitCommand ).map(x => (x._1, () => x._2))
for ((name, script) <- allInitScrips) {
log.info(s" INIT SCRIPT: $name")
eval(script).map { sc =>
presentationCompiler.addScripts(sc)
}
}
repl.setInitFinished()
}
override def preStart() {
preStartLogic()
super.preStart()
}
override def postStop() {
log.info("ReplCalculator postStop")
presentationCompiler.stop()
super.postStop()
}
override def preRestart(reason: Throwable, message: Option[Any]) {
log.info("ReplCalculator preRestart " + message)
reason.printStackTrace()
super.preRestart(reason, message)
}
override def postRestart(reason: Throwable) {
log.info("ReplCalculator postRestart")
reason.printStackTrace()
super.postRestart(reason)
}
def receive = {
case msgThatShouldBeFromTheKernel =>
msgThatShouldBeFromTheKernel match {
case req @ InterruptCellRequest(_) =>
executor.forward(req)
case InterruptRequest => executor.forward(InterruptRequest)
case req@ExecuteRequest(_, _, code) => executor.forward(req)
case CompletionRequest(line, cursorPosition) =>
val (matched, candidates) = presentationCompiler.complete(line, cursorPosition)
sender ! CompletionResponse(cursorPosition, candidates, matched)
case ObjectInfoRequest(code, position) =>
val completions = repl.objectInfo(code, position)
val resp = if (completions.length == 0) {
ObjectInfoResponse(found = false, code, "", "")
} else {
ObjectInfoResponse(found = true, code, completions.mkString("\\n"), "")
}
sender ! resp
}
}
}
|
radek1st/spark-notebook
|
modules/kernel/src/main/scala-2.10/notebook/ReplCalculator.scala
|
Scala
|
apache-2.0
| 20,737
|
//
// Codex - a multi-language code indexer and grokker
// http://github.com/samskivert/codex
package codex.data
/** Defines a fully qualified project id. This is rooted in the Maven style and contains:
* `groupId`, `artifactId` and `version`.
*/
case class FqId (groupId :String, artifactId :String, version :String) {
/** Creates a copy of this FqId with `suff` tacked onto its version. */
def dedupe (suff :Int) = copy(version = s"$version-$suff")
/** Returns a path fragment that's useful for embedding this id in a URL. */
def path = s"${groupId}/${artifactId}/${version}"
override def toString = groupId + ":" + artifactId + ":" + version
}
|
samskivert/codex
|
src/main/scala/codex/data/FqId.scala
|
Scala
|
bsd-3-clause
| 666
|
// Copyright (c) 2011 Paul Butcher
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.borachio
import com.borachio.scalatest.MockFactory
import org.scalatest.WordSpec
class MockFunctionTest extends WordSpec with MockFactory {
autoVerify = false
def repeat(n: Int)(what: => Unit) {
for (i <- 0 until n)
what
}
case class TestException() extends RuntimeException
"A mock function" should {
"return null unless told otherwise" in {
val m = mockFunction[String]
m expects ()
expect(null) { m() }
verifyExpectations
}
"return what it's told to" in {
val m = mockFunction[String]
m returns "foo"
expect("foo") { m() }
verifyExpectations
}
"throw what it's told to" in {
val m = mockFunction[String]
m throws new TestException
intercept[TestException] { m() }
verifyExpectations
}
"match arguments" in {
val m = mockFunction[Int, String, Double]
m expects (42, "foo") returning 1.23
expect(1.23) { m(42, "foo") }
verifyExpectations
}
"match single element arguments" in {
val m = mockFunction[Int, Int]
m expects (42) returning 43
expect(43) { m(42) }
verifyExpectations
}
"fail if there are no matching arguments" in {
val m = mockFunction[Int, String, Double]
m expects (42, "foo") returning 1.23
intercept[ExpectationException] { m(42, "bar") }
}
"allow any arguments if none are specified" in {
val m = mockFunction[Int, String, Double]
m returns 1.23
expect(1.23) { m(1, "foo") }
expect(1.23) { m(2, "bar") }
expect(1.23) { m(-1, null) }
verifyExpectations
}
"match multiple expectations in any order" in {
val m1 = mockFunction[Int, String, Double]
val m2 = mockFunction[String, String]
m1 expects (42, "foo") returning 1.23
m2 expects ("foo") returning "bar"
m1 expects (0, "baz") returning 3.45
expect(3.45) { m1(0, "baz") }
expect(1.23) { m1(42, "foo") }
expect("bar") { m2("foo") }
verifyExpectations
}
"fail if an expectation is not met" in {
val m = mockFunction[Int]
m expects ()
intercept[ExpectationException] { verifyExpectations }
}
"allow multiple calls if no range is set" in {
val m = mockFunction[Int]
m expects ()
repeat(3) { m() }
verifyExpectations
}
"succeed with the minimum number of calls in a range" in {
val m = mockFunction[Int]
m expects () repeat (3 to 7)
repeat(3) { m() }
verifyExpectations
}
"succeed with the maximum number of calls in a range" in {
val m = mockFunction[Int]
m expects () repeat (3 to 7)
repeat(7) { m() }
verifyExpectations
}
"fail if the minimum number if calls isn't satisfied" in {
val m = mockFunction[Int]
m expects () repeat (3 to 7)
repeat(2) { m() }
intercept[ExpectationException] { verifyExpectations }
}
"fail if the maximum number if calls is exceeded" in {
val m = mockFunction[Int]
m expects () repeat (3 to 7)
intercept[ExpectationException] { repeat(8) { m() } }
}
"handle a degenerate sequence" in {
val m = mockFunction[Int, Int]
inSequence {
m expects (42) returning 10
}
expect(10) { m(42) }
verifyExpectations
}
"handle a sequence of calls" in {
val m = mockFunction[Int, Int]
inSequence {
m expects (42) returning 10 repeat (3 to 7)
m expects (43) returning 11 repeat 1
m expects (44) returning 12 twice
}
repeat(5) { expect(10) { m(42) } }
repeat(1) { expect(11) { m(43) } }
repeat(2) { expect(12) { m(44) } }
verifyExpectations
}
"fail if functions are called out of sequence" in {
val m = mockFunction[Int, Int]
inSequence {
m expects (42) returning 10 repeat (3 to 7)
m expects (43) returning 11 repeat 1
m expects (44) returning 12 twice
}
repeat(5) { m(42) }
intercept[ExpectationException] { m(44) }
}
"fail if the entire sequence isn't called" in {
val m = mockFunction[Int, Int]
inSequence {
m expects (42) returning 10 repeat (3 to 7)
m expects (43) returning 11 once;
m expects (44) returning 12 twice
}
repeat(5) { m(42) }
repeat(1) { m(43) }
intercept[ExpectationException] { verifyExpectations }
}
"handle a combination of ordered and unordered expectations" in {
val m = mockFunction[Int, Unit]
m expects (1)
inSequence {
m expects (11)
m expects (12)
m expects (13)
}
m expects (21)
inSequence {
m expects (31)
m expects (32)
}
m expects (41)
m(21)
m(31)
m(11)
m(12)
m(1)
m(32)
m(41)
m(13)
verifyExpectations
}
"handle a sequence in which functions are called zero times" in {
val m = mockFunction[Int, Unit]
inSequence {
m expects (1) once;
m expects (2) never;
m expects (3) anyNumberOfTimes;
m expects (4) once
}
m(1)
m(4)
verifyExpectations
}
"handle valid deeply nested expectation contexts" in {
val m = mockFunction[String, Unit]
m expects ("1")
inSequence {
m expects ("2.1")
inAnyOrder {
m expects ("2.2.1")
inSequence {
m expects ("2.2.2.1")
m expects ("2.2.2.2")
}
m expects ("2.2.3")
}
m expects ("2.3")
}
m expects ("3")
m("2.1")
m("1")
m("2.2.3")
m("2.2.2.1")
m("2.2.2.2")
m("2.2.1")
m("3")
m("2.2.3")
m("2.3")
verifyExpectations
}
"handle invalid deeply nested expectation contexts" in {
val m = mockFunction[String, Unit]
m expects ("1")
inSequence {
m expects ("2.1")
inAnyOrder {
m expects ("2.2.1")
inSequence {
m expects ("2.2.2.1")
m expects ("2.2.2.2")
}
m expects ("2.2.3")
}
m expects ("2.3")
}
m expects ("3")
m("2.1")
m("1")
m("2.2.3")
intercept[ExpectationException] { m("2.2.2.2") }
}
"match wildcard arguments" in {
val m = mockFunction[Int, String, Unit]
m expects (42, "foo")
m expects (*, "bar")
m expects (0, *)
m(42, "foo")
m(1, "bar")
m(2, "bar")
m(0, "something")
m(0, null)
intercept[ExpectationException] { m(1, "something") }
}
"match epsilon arguments" in {
val m = mockFunction[Double, Double]
m expects (~42.0) returning 1.0
m expects (~0.0)
m(42.0001)
m(-0.0001)
intercept[ExpectationException] { m(42.1) }
}
"cope with a SUT that swallows exceptions" in {
val m = mockFunction[Unit]
try {
m()
} catch {
case _ => // do nothing
}
intercept[ExpectationException] { verifyExpectations }
}
"match a simple predicate" in {
val m = mockFunction[Int, Double, String]
m expectsWhere { (x: Int, y: Double) => x < y } returning "predicate matched"
expect("predicate matched") { m(10, 12.0) }
verifyExpectations
}
"fail if a predicate does not match" in {
val m = mockFunction[Int, Double, String]
m expectsWhere { (x: Int, y: Double) => x < y } returning "predicate matched"
intercept[ExpectationException] { m(12, 10.0) }
}
"allow return values to be computed" in {
val m = mockFunction[Int, Int]
m expects (*) onCall { x: Int => x + 1 } twice
expect(2) { m(1) }
expect(10) { m(9) }
}
}
}
|
paulbutcher/borachio
|
core_tests/src/test/scala/MockFunctionTest.scala
|
Scala
|
mit
| 9,192
|
package es.weso.computex.profile
import java.net.URI
import scala.io.Source
import com.hp.hpl.jena.query.QueryFactory
import com.hp.hpl.jena.rdf.model.Model
import com.hp.hpl.jena.rdf.model.ModelFactory
import com.hp.hpl.jena.rdf.model.RDFList
import com.hp.hpl.jena.rdf.model.RDFNode
import com.hp.hpl.jena.rdf.model.Resource
import es.weso.computex.profile._
import es.weso.computex.PREFIXES
import es.weso.utils.JenaUtils
import com.hp.hpl.jena.vocabulary.RDF
import com.hp.hpl.jena.update.UpdateFactory
import scala.collection.immutable.VectorBuilder
case class ProfileParser(profile : Model) {
val rdf_type = profile.createProperty(PREFIXES.rdf + "type")
val cex_ValidationProfile = profile.createProperty(PREFIXES.cex + "ValidationProfile")
val cex_ontologyBase = profile.createProperty(PREFIXES.cex + "ontologyBase")
val cex_import = profile.createProperty(PREFIXES.cex + "import")
val cex_integrityQuery = profile.createProperty(PREFIXES.cex + "integrityQuery")
val cex_expandQuery = profile.createProperty(PREFIXES.cex + "expandQuery")
val cex_expandSteps = profile.createProperty(PREFIXES.cex + "expandSteps")
val cex_computeSteps = profile.createProperty(PREFIXES.cex + "computeSteps")
val cex_name = profile.createProperty(PREFIXES.cex + "name")
val cex_uri = profile.createProperty(PREFIXES.cex + "uri")
def validators(resource: Resource): Seq[Validator] = {
val vals = Vector.newBuilder[Validator]
val iter = profile.listStatements(resource,cex_integrityQuery,null)
while (iter.hasNext) {
val s = iter.next
val r = s.getObject()
val name = JenaUtils.getLiteral(r,cex_name)
val uri = JenaUtils.getObjectURI(r,cex_uri)
val query = QueryFactory.read(uri.toString)
vals += Validator(query,name,uri)
}
vals.result
}
def computeSteps(resource : Resource): Seq[ComputeStep] = {
val seq = Vector.newBuilder[ComputeStep]
val iter = profile.listStatements(resource,cex_computeSteps,null)
if (iter.hasNext) {
val s = iter.next
val nodeList : RDFNode = s.getObject()
var current : Resource = nodeList.asResource()
while (!RDF.nil.equals(current)) {
val r = current.getRequiredProperty(RDF.first).getObject.asResource
val name = JenaUtils.getLiteral(r,cex_name)
val uri = JenaUtils.getObjectURI(r,cex_uri)
val contents = Source.fromURI(uri).mkString
val query = QueryFactory.read(uri.toString)
seq += ComputeStep(query,name,uri)
current = current.getRequiredProperty(RDF.rest).getObject.asResource
}
}
seq.result
}
/*
def expanders(resource : Resource): Seq[Expander] = {
val seq = Vector.newBuilder[Expander]
val iter = profile.listStatements(resource,cex_expandSteps,null)
if (iter.hasNext) {
val s = iter.next
val nodeList : RDFNode = s.getObject()
var current : Resource = nodeList.asResource()
while (!RDF.nil.equals(current)) {
val r = current.getRequiredProperty(RDF.first).getObject.asResource
val name = JenaUtils.getLiteral(r,cex_name)
val uri = JenaUtils.getObjectURI(r,cex_uri)
val contents = Source.fromURI(uri).mkString
val updateReq = UpdateFactory.create(contents)
seq += Expander(updateReq,name,uri)
current = current.getRequiredProperty(RDF.rest).getObject.asResource
}
}
seq.result
}
*/
def name(resource: Resource): String = {
JenaUtils.getLiteral(resource, cex_name)
}
def base(resource: Resource): URI = {
JenaUtils.getObjectURI(resource, cex_ontologyBase)
}
def uri(resource: Resource): String = {
resource.getURI
}
def imports(resource: Resource, visited: Seq[URI]): Seq[(URI,Profile)] = {
val seq = Vector.newBuilder[(URI,Profile)]
val iter = profile.listStatements(resource,cex_import,null)
while (iter.hasNext) {
val s = iter.next
val uri = JenaUtils.getURI(s.getObject)
if (visited.contains(uri)) {
throw new Exception("imports: URI " + uri + " already visited. List of visited uris = " + visited)
} else {
val contents = Source.fromURI(uri).mkString
val model = JenaUtils.parseFromString(contents)
val profiles = ProfileParser.fromModel(model,uri +: visited)
seq ++= profiles.map(p => (uri,p))
}
}
seq.result
}
def getProfiles(visited: Seq[URI] = Seq()): Seq[Profile] = {
val seq = Vector.newBuilder[Profile]
val iter = profile.listStatements(null,rdf_type,cex_ValidationProfile)
while (iter.hasNext) {
val s = iter.next
val r = s.getSubject()
val n = name(r)
val u = uri(r)
val b = base(r)
val vals = validators(r)
// val exps = expanders(r)
val comps = computeSteps(r)
val imps = imports(r, new URI(r.getURI) +: visited)
seq += Profile(b,vals,comps,imps,n,u)
}
seq.result
}
}
object ProfileParser {
/**
* Retrieves the list of profiles that appear in a Model
* @param model the model to parse
* @param visited the sequence of URIs that have already been visited
* Empty by default
*
*/
def fromModel(model:Model,
visited: Seq[URI] = Seq()
) : Seq[Profile] = ProfileParser(model).getProfiles(visited)
/**
* Generates a model from a profile
*/
def toModel(profile: Profile) : Model = {
val m= ModelFactory.createDefaultModel
m.setNsPrefixes(PREFIXES.cexMapping)
val root = m.createResource(profile.uri)
val rdf_type = m.createProperty(PREFIXES.rdf + "type")
val rdf_List = m.createProperty(PREFIXES.rdf + "List")
val rdf_first = m.createProperty(PREFIXES.rdf + "first")
val rdf_rest = m.createProperty(PREFIXES.rdf + "rest")
val cex_ontologyBase = m.createProperty(PREFIXES.cex + "ontologyBase")
val cex_ValidationProfile = m.createProperty(PREFIXES.cex + "ValidationProfile")
val cex_import = m.createProperty(PREFIXES.cex + "import")
val cex_integrityQuery = m.createProperty(PREFIXES.cex + "integrityQuery")
val cex_expandQuery = m.createProperty(PREFIXES.cex + "expandQuery")
val cex_expandSteps = m.createProperty(PREFIXES.cex + "expandSteps")
val cex_computeSteps = m.createProperty(PREFIXES.cex + "computeSteps")
val cex_name = m.createProperty(PREFIXES.cex + "name")
val cex_uri = m.createProperty(PREFIXES.cex + "uri")
m.add(root,rdf_type,cex_ValidationProfile)
m.add(root,cex_name,profile.name)
val uriBase = m.createResource(profile.ontologyBase.toString)
m.add(root,cex_ontologyBase,uriBase)
for (i <- profile.imports) {
val uri = m.createResource(i._1.toString)
m.add(root,cex_import,uri)
}
/* if (profile.expanders.length > 0) {
// Generate RDF Collection list of expanders
val lsNodes = Vector.newBuilder[RDFNode]
for (e <- profile.expanders) {
val currentNode = m.createResource
m.add(currentNode,cex_name,e.name)
val uriExpander = m.createResource(e.uri.toString)
m.add(currentNode,cex_uri,uriExpander)
lsNodes += currentNode
}
val listExpanders = m.createList(lsNodes.result.toArray)
m.add(root,cex_expandSteps,listExpanders)
}
*/
if (profile.computeSteps.length > 0) {
// Generate RDF Collection list of computeSteps
val lsComps = Vector.newBuilder[RDFNode]
for (e <- profile.computeSteps) {
val currentNode = m.createResource
m.add(currentNode,cex_name,e.name)
val uriComputeStep = m.createResource(e.uri.toString)
m.add(currentNode,cex_uri,uriComputeStep)
lsComps += currentNode
}
val listComputeStep = m.createList(lsComps.result.toArray)
m.add(root,cex_computeSteps,listComputeStep)
}
for (v <- profile.validators) {
val resourceValidator = m.createResource
m.add(root,cex_integrityQuery,resourceValidator)
m.add(resourceValidator,cex_name,v.name)
val uriValidator = m.createResource(v.uri.toString)
m.add(resourceValidator,cex_uri,uriValidator)
}
m
}
}
|
weso/computex
|
app/es/weso/computex/profile/ProfileParser.scala
|
Scala
|
apache-2.0
| 8,161
|
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.chart
import java.io.OutputStream
import com.fasterxml.jackson.core.JsonFactory
import com.netflix.atlas.chart.model.GraphDef
object GraphEngine {
val jsonFactory = new JsonFactory
}
trait GraphEngine {
def name: String
def contentType: String
def write(config: GraphDef, output: OutputStream): Unit
}
|
brharrington/atlas
|
atlas-chart/src/main/scala/com/netflix/atlas/chart/GraphEngine.scala
|
Scala
|
apache-2.0
| 945
|
package org.brijest.storm
package engine
package impl.local
import scala.util.parsing.combinator._
import scala.concurrent.SyncVar
import collection._
import com.weiglewilczek.slf4s._
import model._
class LocalEngine(config: Config, val player: Player, w: World) extends Engine with Engine.State with Logging {
engine =>
private val playeruis = mutable.ArrayBuffer[UI]()
private val commands = mutable.ArrayBuffer[Command]()
@volatile private var running = true
@volatile private var paused = false
def isPaused = paused
/* simulation thread */
class SimulationThread extends Thread("Local simulator") {
val area = w.area(w.position(player)).get.acquire()
val sim = new Simulator(area)
val pc = w.pc(player)
override def run() {
sim.init()
while (running) {
/* collect inputs */
processCommands()
/* step through simulation */
val (_, actions) = sim.step()
/* wait */
Thread.sleep(10)
engine.synchronized {
while (paused) {
processCommands()
if (paused) engine.wait()
}
}
}
}
private def processCommands() {
engine.synchronized {
for (comm <- commands) comm match {
case OrderCommand(plid, o) => pc.order.:=(o)(area)
case ScriptCommand(s) => script(s)
case EmptyCommand => // do nothing
}
commands.clear()
}
}
}
val simthr = new SimulationThread
def start() = simthr.start()
def awaitTermination() = simthr.join()
def listen(ui: UI) = {
playeruis += ui
}
def push(comm: Command) = engine.synchronized {
commands += comm
engine.notify()
}
/* scripting */
object dsl extends syntactical.StandardTokenParsers {
val global = mutable.HashMap[String, List[Any] => Any](
"end" -> {
xs => engine.synchronized {
paused = false
running = false
engine.notify()
}
},
"pause" -> {
xs => engine.synchronized {
if (running) {
paused = true
engine.notify()
}
}
},
"resume" -> {
xs => engine.synchronized {
paused = false
engine.notify()
}
},
"togglePause" -> {
xs => engine.synchronized {
if (running) {
paused = !paused
engine.notify()
}
}
}
)
def interpret(m: String) = script(new lexical.Scanner(m)) match {
case Success(obj, _) => obj
case Failure(msg, _) => sys.error(msg)
case Error(msg, _) => sys.error(msg)
}
/* language */
lexical.delimiters ++= List("(", ")", ",")
def script: Parser[Any] = expression
def expression: Parser[Any] = functioncall
def functioncall: Parser[Any] = ident ~ argslist ^^ {
case func ~ args => global(func)(args)
}
def argslist = "(" ~> repsep(expression, ",") <~ ")" ^^ {
case os: List[_] => os
}
}
def script(m: String) = dsl.interpret(m)
}
|
axel22/scala-2d-game-editor
|
src/main/scala/org/brijest/storm/engine/impl/local/LocalEngine.scala
|
Scala
|
bsd-3-clause
| 3,157
|
/**
* Copyright 2016 Confluent Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.confluent.examples.streams
import java.util.Properties
import org.apache.kafka.common.serialization._
import org.apache.kafka.streams._
import org.apache.kafka.streams.kstream.{KStream, KStreamBuilder}
/**
* Demonstrates how to perform simple, state-less transformations via map functions.
* Same as [[MapFunctionLambdaExample]] but in Scala.
*
* Use cases include e.g. basic data sanitization, data anonymization by obfuscating sensitive data
* fields (such as personally identifiable information aka PII). This specific example reads
* incoming text lines and converts each text line to all-uppercase.
*
* Requires a version of Scala that supports Java 8 and SAM / Java lambda (e.g. Scala 2.11 with
* `-Xexperimental` compiler flag, or 2.12).
*
* HOW TO RUN THIS EXAMPLE
*
* 1) Start Zookeeper and Kafka.
* Please refer to <a href='http://docs.confluent.io/current/quickstart.html#quickstart'>QuickStart</a>.
*
* 2) Create the input and output topics used by this example.
*
* {{{
* $ bin/kafka-topics --create --topic TextLinesTopic --zookeeper localhost:2181 --partitions 1 --replication-factor 1
* $ bin/kafka-topics --create --topic UppercasedTextLinesTopic --zookeeper localhost:2181 --partitions 1 --replication-factor 1
* $ bin/kafka-topics --create --topic OriginalAndUppercasedTopic --zookeeper localhost:2181 --partitions 1 --replication-factor 1
* }}}
*
* Note: The above commands are for the Confluent Platform. For Apache Kafka it should be
* `bin/kafka-topics.sh ...`.
*
* 3) Start this example application either in your IDE or on the command line.
*
* If via the command line please refer to
* <a href='https://github.com/confluentinc/examples/tree/master/kafka-streams#packaging-and-running'>Packaging</a>.
* Once packaged you can then run:
*
* {{{
* $ java -cp target/streams-examples-3.2.0-standalone.jar io.confluent.examples.streams.MapFunctionScalaExample
* }
* }}}
*
* 4) Write some input data to the source topics (e.g. via `kafka-console-producer`. The already
* running example application (step 3) will automatically process this input data and write the
* results to the output topics.
*
* {{{
* # Start the console producer. You can then enter input data by writing some line of text,
* # followed by ENTER:
* #
* # hello kafka streams<ENTER>
* # all streams lead to kafka<ENTER>
* #
* # Every line you enter will become the value of a single Kafka message.
* $ bin/kafka-console-producer --broker-list localhost:9092 --topic TextLinesTopic
* }}}
*
* 5) Inspect the resulting data in the output topics, e.g. via `kafka-console-consumer`.
*
* {{{
* $ bin/kafka-console-consumer --zookeeper localhost:2181 --topic UppercasedTextLinesTopic --from-beginning
* $ bin/kafka-console-consumer --zookeeper localhost:2181 --topic OriginalAndUppercasedTopic --from-beginning
* }}}
*
* You should see output data similar to:
* {{{
* HELLO KAFKA STREAMS
* ALL STREAMS LEAD TO KAFKA
* }}}
*
* 6) Once you're done with your experiments, you can stop this example via `Ctrl-C`. If needed,
* also stop the Kafka broker (`Ctrl-C`), and only then stop the ZooKeeper instance (`Ctrl-C`).
*/
object MapFunctionScalaExample {
def main(args: Array[String]) {
val builder: KStreamBuilder = new KStreamBuilder
val streamingConfig = {
val settings = new Properties
settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "map-function-scala-example")
settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
// Specify default (de)serializers for record keys and for record values.
settings.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.ByteArray.getClass.getName)
settings.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String.getClass.getName)
settings
}
val stringSerde: Serde[String] = Serdes.String()
// Read the input Kafka topic into a KStream instance.
val textLines: KStream[Array[Byte], String] = builder.stream("TextLinesTopic")
// Variant 1: using `mapValues`
val uppercasedWithMapValues: KStream[Array[Byte], String] = textLines.mapValues(_.toUpperCase())
// Write (i.e. persist) the results to a new Kafka topic called "UppercasedTextLinesTopic".
//
// In this case we can rely on the default serializers for keys and values because their data
// types did not change, i.e. we only need to provide the name of the output topic.
uppercasedWithMapValues.to("UppercasedTextLinesTopic")
// We are using implicit conversions to convert Scala's `Tuple2` into Kafka Streams' `KeyValue`.
// This allows us to write streams transformations as, for example:
//
// map((key, value) => (key, value.toUpperCase())
//
// instead of the more verbose
//
// map((key, value) => new KeyValue(key, value.toUpperCase())
//
import KeyValueImplicits._
// Variant 2: using `map`, modify value only (equivalent to variant 1)
val uppercasedWithMap: KStream[Array[Byte], String] = textLines.map((key, value) => (key, value.toUpperCase()))
// Variant 3: using `map`, modify both key and value
//
// Note: Whether, in general, you should follow this artificial example and store the original
// value in the key field is debatable and depends on your use case. If in doubt, don't
// do it.
val originalAndUppercased: KStream[String, String] = textLines.map((key, value) => (value, value.toUpperCase()))
// Write the results to a new Kafka topic "OriginalAndUppercasedTopic".
//
// In this case we must explicitly set the correct serializers because the default serializers
// (cf. streaming configuration) do not match the type of this particular KStream instance.
originalAndUppercased.to(stringSerde, stringSerde, "OriginalAndUppercasedTopic")
val stream: KafkaStreams = new KafkaStreams(builder, streamingConfig)
stream.start()
}
}
|
randyzingle/tools
|
gradle-streams/src/main/scala/io/confluent/examples/streams/MapFunctionScalaExample.scala
|
Scala
|
apache-2.0
| 6,637
|
package com.github.xiaodongw.swagger.finatra
import com.twitter.finagle.http.Request
import com.twitter.finatra.http.Controller
import com.twitter.finatra.response.Mustache
import io.swagger.models.Swagger
import io.swagger.util.Json
@Mustache("index")
case class SwaggerView(title: String, path: String)
class SwaggerController(docPath: String = "/api-docs", swagger: Swagger) extends Controller {
get(s"${docPath}/model") { request: Request =>
response.ok.body(Json.mapper.writeValueAsString(swagger))
.contentType("application/json").toFuture
}
get(s"${docPath}/ui") { request: Request =>
response.temporaryRedirect
.location("/webjars/swagger-ui/2.2.6/index.html?url=/api-docs/model")
}
}
|
forthy/swagger-finatra
|
src/main/scala/com/github/xiaodongw/swagger/finatra/SwaggerController.scala
|
Scala
|
apache-2.0
| 727
|
package dragon
import java.awt.{BasicStroke, Color}
object Config {
val edgeLength = 2
val generations = 20
val startPosition = Pos(Image.width * 3 / 4, Image.height * 3 / 4)
val fileName: String = "D:\\\\tmp\\\\dragon-turtle.png"
object Image {
val width: Int = 10000
val height: Int = 10000
val penColor: Color = Color.CYAN
val lineStyle: BasicStroke = new BasicStroke()
}
}
object Main extends App {
private val startTime: Long = System.nanoTime
Dragon.draw(Config.generations)
println((System.nanoTime() - startTime) / 1000000.0)
}
object Painter {
import java.awt.geom._
import java.awt.image.BufferedImage
import java.io.File
import javax.imageio.ImageIO
val imageSize = (Config.Image.width, Config.Image.height)
val canvas = new BufferedImage(imageSize._1, imageSize._2, BufferedImage.TYPE_INT_RGB)
val g = canvas.createGraphics()
g.setStroke(Config.Image.lineStyle)
g.setColor(Config.Image.penColor)
def line(start: Pos, end: Pos) = g.draw(new Line2D.Double(start.x, start.y, end.x, end.y))
def out(fileName: String) = {
ImageIO.write(canvas, "png", new File(fileName))
}
}
abstract class Turtle(var currentPos: Pos = Pos(0, 0)) {
val painter = Painter
var currentDirection: Direction = Up
def right() = currentDirection = currentDirection.right
def left() = currentDirection = currentDirection.left
def forward(n: Int) = {
val newPosition: Pos = currentDirection.forward(currentPos, n)
drawLine(currentPos, newPosition)
currentPos = newPosition
}
def drawLine(start: Pos, end: Pos) = painter.line(start, end)
def saveImage() = painter.out(Config.fileName)
}
object Dragon {
type Curve = Seq[Move]
val startTurn: Curve = Seq(Right)
object turtle extends Turtle(Config.startPosition)
trait Move {
def turn()
def draw() = {
turn(); turtle.forward(Config.edgeLength)
}
def invert: Move
}
case object Right extends Move {
def turn() = turtle.right()
def invert: Move = Left
}
case object Left extends Move {
def turn() = turtle.left()
def invert: Move = Right
}
def iteration(n: Int): Curve = {
def iterAcc(n: Int, acc: Curve): Curve =
if (n == 0) acc
else iterAcc(n - 1, acc ++ startTurn ++ acc.reverse.map(_.invert))
iterAcc(n, startTurn)
}
def draw(generations: Int) {
iteration(generations) foreach (_.draw())
turtle.saveImage()
}
}
|
Nelosh/dragon-curve
|
src/main/scala/dragon/Dragon.scala
|
Scala
|
unlicense
| 2,629
|
package controllers
import play.api.libs.json._
import java.io._
import java.util.regex._
import scala.collection.mutable.HashMap
case class Annotation(motifNum: Int, reverse: Boolean, position: Int, gene: String, pvalue: Double)
case class GeneAnnotations(gene: String, annotations: Seq[Annotation])
case class MotifInfo(motifNum: Int, evalue: Double, pssm: Array[Array[Float]], annotations: Array[Annotation])
case class Snapshot(rows: Map[Int, List[String]], columns: Map[Int, List[String]],
residuals: Map[Int, Double],
motifs: Map[String, Map[Int, Array[MotifInfo]]]) {
}
object SnapshotReader {
val BaseResultsFileName = "%d-results.json"
val JsonFilePattern = Pattern.compile("(\\\\d+)-results.json")
}
class SnapshotReader(OutDirectory: File, Synonyms: SynonymsMap) {
import SnapshotReader._
implicit object SnapshotFormat extends Format[Snapshot] {
private def readMotifInfos(stMotifs: Seq[JsValue]) = {
val motifInfos = new java.util.ArrayList[MotifInfo]
for (motif <- stMotifs) {
val motifObj = motif.asInstanceOf[JsObject]
val pssm = motifObj.value("pssm").as[Array[Array[Float]]]
val evalue = if (motifObj.keys.contains("evalue")) (motifObj \\ "evalue").as[Double]
else 0.0
val motifNum = (motifObj \\ "motif_num").as[Int]
val annotations = if (motifObj.keys.contains("annotations")) {
val annots = (motifObj \\ "annotations").asInstanceOf[JsArray]
val annotArr = new Array[Annotation](annots.value.length)
for (i <- 0 until annotArr.length) {
val current = annots(i)
annotArr(i) = Annotation(motifNum,
(current \\ "reverse").as[Boolean],
(current \\ "position").as[Int],
(current \\ "gene").as[String],
(current \\ "pvalue").as[Double])
}
annotArr
} else Array[Annotation]()
motifInfos.add(MotifInfo(motifNum, evalue, pssm, annotations))
}
motifInfos.toArray(new Array[MotifInfo](0))
}
def reads(json: JsValue): Snapshot = {
val rows = (json \\ "rows").as[JsObject]
val cols = (json \\ "columns").as[JsObject]
val residuals = (json \\ "residuals").as[JsObject]
val motifsVal = (json \\ "motifs")
val clusterRows = new HashMap[Int, List[String]]
for (field <- rows.fields) {
clusterRows(field._1.toInt) = field._2.as[List[String]].map(str => Synonyms(str))
}
val clusterCols = new HashMap[Int, List[String]]
for (field <- cols.fields) {
clusterCols(field._1.toInt) = field._2.as[List[String]]
}
val clusterResiduals = new HashMap[Int, Double]
for (field <- residuals.fields) {
clusterResiduals(field._1.toInt) = field._2.asInstanceOf[JsNumber].value.doubleValue
}
//val clusterMotifs = new HashMap[Int, Map[String, Array[MotifInfo]]]
val seqTypeMotifs = new HashMap[String, Map[Int, Array[MotifInfo]]]
try {
motifsVal match {
case motifs:JsObject =>
for (field <- motifs.fields) {
val seqType = field._1
val clusterObj = field._2.as[JsObject] // Map[Int, Array[MotifInfo]]
//val seqTypeObj = field._2.as[JsObject]
//val seqTypeMotifs = new HashMap[String, Array[MotifInfo]]
val clusterMotifs = new HashMap[Int, Array[MotifInfo]]
// iterate over the clusters, which are the keys
for (cluster <- clusterObj.keys) {
// an array of motif objects (motif_num, evalue, annotations, pssm)
// annotations are tuples of (gene, position, pvalue, reverse)
val stResult = clusterObj \\ cluster
val motifInfos = (stResult \\ "motif-info")
//seqTypeMotifs(seqType) = if (motifInfos.isInstanceOf[JsArray]) {
// readMotifInfos(motifInfos.asInstanceOf[JsArray].value)
clusterMotifs(cluster.toInt) = if (motifInfos.isInstanceOf[JsArray]) {
readMotifInfos(motifInfos.asInstanceOf[JsArray].value)
} else {
Array.ofDim[MotifInfo](0)
}
}
//clusterMotifs(field._1.toInt) = seqTypeMotifs.toMap
seqTypeMotifs(seqType) = clusterMotifs.toMap
}
case _ =>
println("no motif values found")
}
} catch {
case e =>
e.printStackTrace
println("\\nNo motifs found !!!")
}
Snapshot(clusterRows.toMap, clusterCols.toMap, clusterResiduals.toMap, seqTypeMotifs.toMap)
}
def writes(snapshot: Snapshot): JsValue = JsUndefined("TODO")
}
def readSnapshot(iteration: Int) : Option[Snapshot] = {
val pathname = (OutDirectory + "/" + BaseResultsFileName).format(iteration)
printf("Reading snapshot: %s\\n", pathname)
val infile = new File(pathname)
if (infile.exists) {
val in = new BufferedReader(new FileReader(infile))
val buffer = new StringBuilder
var line = in.readLine
while (line != null) {
buffer.append(line)
line = in.readLine
}
in.close
Some(play.api.libs.json.Json.parse(buffer.toString).as[Snapshot])
} else {
printf("File '%s' does not exist !\\n", infile.getName)
None
}
}
}
|
jashworth-isb/cmonkey-python
|
cluster_viewer/app/controllers/SnapshotFormat.scala
|
Scala
|
lgpl-3.0
| 5,514
|
package demo
import chandu0101.scalajs.react.components._
import demo.components._
import demo.components.materialui._
import demo.components.{InfoTemplate, LeftNav, LeftNavPage, ScalaCSSTutorial}
import scalacss.Defaults._
import scalacss.ScalaCssReact._
import scalacss.mutable.GlobalRegistry
object AppCSS {
def load() = {
GlobalRegistry.register(LeftNav.Style,
LeftNavPage.Style,
MuiButtonsDemo.Style,
MuiPaperDemo.Style,
MuiSwitchesDemo.Style,
MobileTearSheet.Style,
ReactTable.DefaultStyle,
ReactListView.DefaultStyle,
ReactSearchBox.DefaultStyle,
Pager.DefaultStyle,
ScalaCSSTutorial.Style,
InfoTemplate.Style,
ReactInfiniteDemo.styles,
ReactDraggable.Style,
MuiTabsDemo.Style)
GlobalRegistry.addToDocumentOnRegistration()
}
}
|
elacin/scalajs-react-components
|
demo/src/main/scala/demo/AppCSS.scala
|
Scala
|
apache-2.0
| 833
|
package com.brkyvz.spark.linalg
import org.apache.spark.mllib.linalg.{Matrix, DenseMatrix, SparseMatrix}
trait MatrixLike extends Serializable {
/** Number of rows. */
def numRows: Int
/** Number of columns. */
def numCols: Int
def size: Int = numRows * numCols
def apply(i: Int): Double
import funcs._
def +(y: MatrixLike): LazyMatrix = add(this, y)
def -(y: MatrixLike): LazyMatrix = sub(this, y)
def :*(y: MatrixLike): LazyMatrix = emul(this, y)
def *(y: MatrixLike): LazyMatrix
def /(y: MatrixLike): LazyMatrix = div(this, y)
}
/** Dense and Sparse Matrices can be mutated. Lazy matrices are immutable. */
sealed trait MutableMatrix extends MatrixLike {
override def *(y: MatrixLike): LazyMatrix = {
require(this.numCols == y.numRows || y.isInstanceOf[Scalar],
s"numCols of left side doesn't match numRows of right. ${this.numCols} vs. ${y.numRows}")
y match {
case mm: MutableMatrix => new LazyMM_MMultOp(this, mm)
case lzy: LazyMatrix => new LazyML_MMultOp(this, lzy)
case scalar: Scalar => funcs.emul(this, scalar)
}
}
def *(y: VectorLike): LazyVector = {
require(this.numCols == y.size,
s"numCols of left side doesn't match numRows of right. ${this.numCols} vs. ${y.size}")
y match {
case dn: DenseVectorWrapper => new LazyMM_MV_MultOp(this, dn)
case sp: SparseVectorWrapper => new LazyMM_MV_MultOp(this, sp)
case lzy: LazyVector => new LazyMM_LV_MultOp(this, lzy)
}
}
}
class DenseMatrixWrapper(
override val numRows: Int,
override val numCols: Int,
override val values: Array[Double],
override val isTransposed: Boolean)
extends DenseMatrix(numRows, numCols, values, isTransposed) with MutableMatrix {
def this(numRows: Int, numCols: Int, values: Array[Double]) =
this(numRows, numCols, values, isTransposed = false)
override def apply(i: Int): Double = values(i)
def +=(y: MatrixLike): this.type = {
require(y.numRows == this.numRows || y.isInstanceOf[Scalar],
s"Rows don't match for in-place addition. ${this.numRows} vs. ${y.numRows}")
require(y.numCols == this.numCols || y.isInstanceOf[Scalar],
s"Cols don't match for in-place addition. ${this.numCols} vs. ${y.numCols}")
y match {
case dd: LazyMM_MMultOp =>
new LazyMM_MMultOp(dd.left, dd.right, Option(this.values), 1.0).compute()
case dl: LazyML_MMultOp =>
new LazyML_MMultOp(dl.left, dl.right, Option(this.values), 1.0).compute()
case ld: LazyLM_MMultOp =>
new LazyLM_MMultOp(ld.left, ld.right, Option(this.values), 1.0).compute()
case ll: LazyLL_MMultOp =>
new LazyLL_MMultOp(ll.left, ll.right, Option(this.values), 1.0).compute()
case _ => new LazyImDenseMMOp(this, y, _ + _).compute(Option(this.values))
}
this
}
def :=(y: LazyMatrix): MatrixLike = {
require(y.numRows == this.numRows,
s"Rows don't match for in-place evaluation. ${this.numRows} vs. ${y.numRows}")
require(y.numCols == this.numCols,
s"Cols don't match for in-place evaluation. ${this.numCols} vs. ${y.numCols}")
y match {
case dd: LazyMM_MMultOp =>
new LazyMM_MMultOp(dd.left, dd.right, Option(this.values), 0.0).compute()
case dl: LazyML_MMultOp =>
new LazyML_MMultOp(dl.left, dl.right, Option(this.values), 0.0).compute()
case ld: LazyLM_MMultOp =>
new LazyLM_MMultOp(ld.left, ld.right, Option(this.values), 0.0).compute()
case ll: LazyLL_MMultOp =>
new LazyLL_MMultOp(ll.left, ll.right, Option(this.values), 0.0).compute()
case _ => y.compute(Option(this.values))
}
this
}
}
object DenseMatrixWrapper {
def apply(mat: DenseMatrix): DenseMatrixWrapper =
new DenseMatrixWrapper(mat.numRows, mat.numCols, mat.values, mat.isTransposed)
}
class SparseMatrixWrapper(
override val numRows: Int,
override val numCols: Int,
override val colPtrs: Array[Int],
override val rowIndices: Array[Int],
override val values: Array[Double],
override val isTransposed: Boolean)
extends SparseMatrix(numRows, numCols, colPtrs, rowIndices, values, isTransposed)
with MutableMatrix {
def this(
numRows: Int,
numCols: Int,
colPtrs: Array[Int],
rowIndices: Array[Int],
values: Array[Double]) =
this(numRows, numCols, colPtrs, rowIndices, values, isTransposed = false)
override def apply(i: Int): Double = this(i % numRows, i / numRows)
}
object SparseMatrixWrapper {
def apply(mat: SparseMatrix): SparseMatrixWrapper = new SparseMatrixWrapper(mat.numRows,
mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed)
}
sealed trait LazyMatrix extends MatrixLike {
def compute(into: Option[Array[Double]] = None): MatrixLike = {
val values = into.getOrElse(new Array[Double](size))
require(values.length == size,
s"Size of buffer (${values.length}) not equal to size of matrix ($size).")
var i = 0
while (i < size) {
values(i) = this(i)
i += 1
}
new DenseMatrixWrapper(numRows, numCols, values)
}
override def *(y: MatrixLike): LazyMatrix = {
require(this.numCols == y.numRows || y.isInstanceOf[Scalar],
s"numCols of left side doesn't match numRows of right. ${this.numCols} vs. ${y.numRows}")
y match {
case mm: MutableMatrix => new LazyLM_MMultOp(this, mm)
case lzy: LazyMatrix => new LazyLL_MMultOp(this, lzy)
case scalar: Scalar => funcs.emul(this, scalar)
}
}
def *(y: VectorLike): LazyVector = {
require(this.numCols == y.size,
s"numCols of left side doesn't match numRows of right. ${this.numCols} vs. ${y.size}")
y match {
case dn: DenseVectorWrapper => new LazyLM_MV_MultOp(this, dn)
case sp: SparseVectorWrapper => new LazyLM_MV_MultOp(this, sp)
case lzy: LazyVector => new LazyLM_LV_MultOp(this, lzy)
}
}
}
private[linalg] abstract class LazyMMOp(
left: MatrixLike,
right: MatrixLike,
operation: (Double, Double) => Double) extends LazyMatrix {
require(left.numRows == right.numRows || left.isInstanceOf[Scalar] || right.isInstanceOf[Scalar],
s"Rows don't match for in-place addition. ${left.numRows} vs. ${right.numRows}")
require(left.numCols == right.numCols || left.isInstanceOf[Scalar] || right.isInstanceOf[Scalar],
s"Cols don't match for in-place addition. ${left.numCols} vs. ${right.numCols}")
override def numRows = math.max(left.numRows, right.numRows)
override def numCols = math.max(left.numCols, right.numCols)
}
private[linalg] class LazyImDenseMMOp(
left: MatrixLike,
right: MatrixLike,
operation: (Double, Double) => Double) extends LazyMMOp(left, right, operation) {
override def apply(i: Int): Double = operation(left(i), right(i))
}
private[linalg] case class LazyImDenseScaleOp(
left: Scalar,
right: MatrixLike) extends LazyImDenseMMOp(left, right, _ * _)
private[linalg] class LazyMatrixMapOp(
parent: MatrixLike,
operation: Double => Double) extends LazyMatrix {
override def numRows = parent.numRows
override def numCols = parent.numCols
override def apply(i: Int): Double = operation(parent(i))
}
private[linalg] abstract class LazyMMultOp(
left: MatrixLike,
right: MatrixLike,
into: Option[Array[Double]] = None,
beta: Double = 1.0) extends LazyMatrix {
override def numRows = left.numRows
override def numCols = right.numCols
}
private[linalg] class LazyLL_MMultOp(
val left: LazyMatrix,
val right: LazyMatrix,
into: Option[Array[Double]] = None,
beta: Double = 1.0) extends LazyMMultOp(left, right, into, beta) {
override def apply(i: Int): Double = result(i)
private var buffer: Option[Array[Double]] = into
lazy val result: DenseMatrixWrapper = {
var leftScale = 1.0
val (effLeft: DenseMatrixWrapper, leftRes) = left match {
case scaled: LazyImDenseScaleOp =>
leftScale = scaled.left.value
(scaled.right, None)
case ll: LazyLL_MMultOp =>
if (ll.size < ll.right.size) {
(ll.compute(), None)
} else {
(ll.right.compute(), Option(ll.left))
}
case ld: LazyLM_MMultOp =>
if (ld.size < ld.right.size) {
(ld.compute(), None)
} else {
(ld.right, Option(ld.left))
}
case dl: LazyML_MMultOp =>
if (dl.size < dl.right.size) {
(dl.compute(), None)
} else {
(dl.right.compute(), Option(dl.left))
}
case dd: LazyMM_MMultOp =>
if (dd.size < dd.right.size) {
(dd.compute(), None)
} else {
(dd.right, Option(dd.left))
}
case _ => (left.compute(), None)
}
var rightScale = 1.0
val (effRight: DenseMatrixWrapper, rightRes) = right match {
case scaled: LazyImDenseScaleOp =>
rightScale = scaled.left.value
(scaled.right, None)
case ll: LazyLL_MMultOp =>
if (ll.size < ll.right.size) {
(ll.compute(), None)
} else {
(ll.right.compute(), Option(ll.left))
}
case ld: LazyLM_MMultOp =>
if (ld.size < ld.right.size) {
(ld.compute(), None)
} else {
(ld.right, Option(ld.left))
}
case dl: LazyML_MMultOp =>
if (dl.size < dl.right.size) {
(dl.compute(), None)
} else {
(dl.right.compute(), Option(dl.left))
}
case dd: LazyMM_MMultOp =>
if (dd.size < dd.right.size) {
(dd.compute(), None)
} else {
(dd.right, Option(dd.left))
}
case _ => (right.compute(), None)
}
val middle =
if (leftRes.isEmpty && rightRes.isEmpty) {
val inside = new DenseMatrixWrapper(effLeft.numRows, effRight.numCols,
buffer.getOrElse(new Array[Double](effLeft.numRows * effRight.numCols)))
BLASUtils.gemm(leftScale * rightScale, effLeft, effRight, beta, inside)
inside
} else {
val inside = DenseMatrix.zeros(effLeft.numRows, effRight.numCols)
BLASUtils.gemm(leftScale * rightScale, effLeft, effRight, 1.0, inside)
inside
}
val rebuildRight = rightRes.getOrElse(None) match {
case l: LazyMatrix => new LazyML_MMultOp(middle, l)
case d: DenseMatrixWrapper => new LazyMM_MMultOp(middle, d)
case None => middle
}
leftRes.getOrElse(None) match {
case l: LazyMatrix =>
rebuildRight match {
case r: LazyMatrix => new LazyLL_MMultOp(l, r, buffer, beta).compute()
case d: DenseMatrixWrapper => new LazyLM_MMultOp(l, d, buffer, beta).compute()
}
case ld: DenseMatrixWrapper =>
rebuildRight match {
case r: LazyMatrix => new LazyML_MMultOp(ld, r, buffer, beta).compute()
case d: DenseMatrixWrapper => new LazyMM_MMultOp(ld, d, buffer, beta).compute()
}
case None =>
rebuildRight match {
case r: LazyMM_MMultOp => new LazyMM_MMultOp(r.left, r.right, buffer, beta).compute()
case l: LazyML_MMultOp => new LazyML_MMultOp(l.left, l.right, buffer, beta).compute()
case d: DenseMatrixWrapper => d
}
}
}
override def compute(into: Option[Array[Double]] = None): DenseMatrixWrapper = {
into.foreach(b => buffer = Option(b))
result
}
}
private[linalg] class LazyLM_MMultOp(
val left: LazyMatrix,
val right: MutableMatrix,
into: Option[Array[Double]] = None,
beta: Double = 1.0) extends LazyMMultOp(left, right, into, beta) {
override def apply(i: Int): Double = result(i)
private var buffer: Option[Array[Double]] = into
lazy val result: DenseMatrixWrapper = {
var leftScale = 1.0
val (effLeft: DenseMatrixWrapper, leftRes) = left match {
case scaled: LazyImDenseScaleOp =>
leftScale = scaled.left.value
(scaled.right, None)
case ll: LazyLL_MMultOp =>
if (ll.size < ll.right.size) {
(ll.compute(), None)
} else {
(ll.right.compute(), Option(ll.left))
}
case ld: LazyLM_MMultOp =>
if (ld.size < ld.right.size) {
(ld.compute(), None)
} else {
(ld.right, Option(ld.left))
}
case dl: LazyML_MMultOp =>
if (dl.size < dl.right.size) {
(dl.compute(), None)
} else {
(dl.right.compute(), Option(dl.left))
}
case dd: LazyMM_MMultOp =>
if (dd.size < dd.right.size) {
(dd.compute(), None)
} else {
(dd.right, Option(dd.left))
}
case _ => (left.compute(), None)
}
val middle =
if (leftRes.isEmpty) {
val inside = new DenseMatrixWrapper(effLeft.numRows, right.numCols,
buffer.getOrElse(new Array[Double](effLeft.numRows * right.numCols)))
BLASUtils.gemm(leftScale, effLeft, right, beta, inside)
inside
} else {
val inside = DenseMatrix.zeros(effLeft.numRows, right.numCols)
BLASUtils.gemm(leftScale, effLeft, right, 1.0, inside)
inside
}
leftRes.getOrElse(None) match {
case l: LazyMatrix => new LazyLM_MMultOp(l, middle, buffer, beta).compute()
case ld: DenseMatrixWrapper => new LazyMM_MMultOp(ld, middle, buffer, beta).compute()
case None => middle
}
}
override def compute(into: Option[Array[Double]] = None): DenseMatrixWrapper = {
into.foreach(b => buffer = Option(b))
result
}
}
private[linalg] class LazyML_MMultOp(
val left: MutableMatrix,
val right: LazyMatrix,
into: Option[Array[Double]] = None,
beta: Double = 1.0) extends LazyMMultOp(left, right, into, beta) {
override def apply(i: Int): Double = result(i)
private var buffer: Option[Array[Double]] = into
lazy val result: DenseMatrixWrapper = {
var rightScale = 1.0
val (effRight: DenseMatrixWrapper, rightRes) = right match {
case scaled: LazyImDenseScaleOp =>
rightScale = scaled.left.value
(scaled.right, None)
case ll: LazyLL_MMultOp =>
if (ll.size < ll.right.size) {
(ll.compute(), None)
} else {
(ll.right.compute(), Option(ll.left))
}
case ld: LazyLM_MMultOp =>
if (ld.size < ld.right.size) {
(ld.compute(), None)
} else {
(ld.right, Option(ld.left))
}
case dl: LazyML_MMultOp =>
if (dl.size < dl.right.size) {
(dl.compute(), None)
} else {
(dl.right.compute(), Option(dl.left))
}
case dd: LazyMM_MMultOp =>
if (dd.size < dd.right.size) {
(dd.compute(), None)
} else {
(dd.right, Option(dd.left))
}
case _ => (right.compute(), None)
}
val middle =
if (rightRes.isEmpty) {
val inside = new DenseMatrixWrapper(left.numRows, effRight.numCols,
buffer.getOrElse(new Array[Double](left.numRows * effRight.numCols)))
BLASUtils.gemm(rightScale, left, effRight, beta, inside)
inside
} else {
val inside = DenseMatrix.zeros(left.numRows, effRight.numCols)
BLASUtils.gemm(rightScale, left, effRight, 0.0, inside)
inside
}
rightRes.getOrElse(None) match {
case l: LazyMatrix => new LazyML_MMultOp(middle, l, buffer, beta).compute()
case d: DenseMatrixWrapper => new LazyMM_MMultOp(middle, d, buffer, beta).compute()
case None => middle
}
}
override def compute(into: Option[Array[Double]] = None): DenseMatrixWrapper = {
into.foreach(b => buffer = Option(b))
result
}
}
private[linalg] class LazyMM_MMultOp(
val left: MutableMatrix,
val right: MutableMatrix,
into: Option[Array[Double]] = None,
beta: Double = 1.0) extends LazyMMultOp(left, right, into, beta) {
override def apply(i: Int): Double = result(i)
private var buffer: Option[Array[Double]] = into
lazy val result: DenseMatrixWrapper = {
val inside = new DenseMatrixWrapper(left.numRows, right.numCols,
buffer.getOrElse(new Array[Double](left.numRows * right.numCols)))
BLASUtils.gemm(1.0, left, right, beta, inside)
inside
}
override def compute(into: Option[Array[Double]] = None): DenseMatrixWrapper = {
into.foreach(b => buffer = Option(b))
result
}
}
|
brkyvz/lazy-linalg
|
src/main/scala/com/brkyvz/spark/linalg/MatrixLike.scala
|
Scala
|
apache-2.0
| 16,299
|
package com.twitter.finagle.thrift
import com.twitter.finagle._
import com.twitter.finagle.tracing._
import com.twitter.finagle.util.ByteArrays
import com.twitter.util.Future
import java.net.InetSocketAddress
import org.apache.thrift.protocol.{TMessage, TMessageType, TProtocolFactory, TBinaryProtocol}
import org.apache.thrift.{TApplicationException, TException}
import org.jboss.netty.channel.{
ChannelHandlerContext,
SimpleChannelDownstreamHandler, MessageEvent, Channels,
ChannelPipelineFactory}
import org.jboss.netty.buffer.ChannelBuffers
object ThriftServerFramedCodec {
def apply() = new ThriftServerFramedCodecFactory
def apply(protocolFactory: TProtocolFactory) =
new ThriftServerFramedCodecFactory(protocolFactory)
def get() = apply()
}
class ThriftServerFramedCodecFactory(protocolFactory: TProtocolFactory)
extends CodecFactory[Array[Byte], Array[Byte]]#Server
{
def this() = this(new TBinaryProtocol.Factory())
def apply(config: ServerCodecConfig) =
new ThriftServerFramedCodec(config, protocolFactory)
}
class ThriftServerFramedCodec(
config: ServerCodecConfig,
protocolFactory: TProtocolFactory = new TBinaryProtocol.Factory()
) extends Codec[Array[Byte], Array[Byte]] {
def pipelineFactory =
new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = Channels.pipeline()
pipeline.addLast("thriftFrameCodec", new ThriftFrameCodec)
pipeline.addLast("byteEncoder", new ThriftServerChannelBufferEncoder)
pipeline.addLast("byteDecoder", new ThriftChannelBufferDecoder)
pipeline
}
}
private[this] val boundAddress = config.boundAddress match {
case ia: InetSocketAddress => ia
case _ => new InetSocketAddress(0)
}
private[this] val preparer = ThriftServerPreparer(
protocolFactory, config.serviceName, boundAddress)
override def prepareConnFactory(factory: ServiceFactory[Array[Byte], Array[Byte]]) =
preparer.prepare(factory)
}
private case class ThriftServerPreparer(
protocolFactory: TProtocolFactory,
serviceName: String,
boundAddress: InetSocketAddress) {
private[this] val uncaughtExceptionsFilter =
new HandleUncaughtApplicationExceptions(protocolFactory)
def prepare(
factory: ServiceFactory[Array[Byte], Array[Byte]]
): ServiceFactory[Array[Byte], Array[Byte]] = factory map { service =>
val trace = new ThriftServerTracingFilter(
serviceName, boundAddress, protocolFactory)
trace andThen uncaughtExceptionsFilter andThen service
}
}
private[thrift] class ThriftServerChannelBufferEncoder
extends SimpleChannelDownstreamHandler
{
override def writeRequested(ctx: ChannelHandlerContext, e: MessageEvent) = {
e.getMessage match {
// An empty array indicates a oneway reply.
case array: Array[Byte] if (!array.isEmpty) =>
val buffer = ChannelBuffers.wrappedBuffer(array)
Channels.write(ctx, e.getFuture, buffer)
case array: Array[Byte] =>
e.getFuture.setSuccess()
case _ => throw new IllegalArgumentException("no byte array")
}
}
}
private[finagle]
class HandleUncaughtApplicationExceptions(protocolFactory: TProtocolFactory)
extends SimpleFilter[Array[Byte], Array[Byte]]
{
def apply(request: Array[Byte], service: Service[Array[Byte], Array[Byte]]) =
service(request) handle {
case e if !e.isInstanceOf[TException] =>
// NB! This is technically incorrect for one-way calls,
// but we have no way of knowing it here. We may
// consider simply not supporting one-way calls at all.
val msg = InputBuffer.readMessageBegin(request, protocolFactory)
val name = msg.name
val buffer = new OutputBuffer(protocolFactory)
buffer().writeMessageBegin(
new TMessage(name, TMessageType.EXCEPTION, msg.seqid))
// Note: The wire contents of the exception message differ from Apache's Thrift in that here,
// e.toString is appended to the error message.
val x = new TApplicationException(
TApplicationException.INTERNAL_ERROR,
"Internal error processing " + name + ": '" + e + "'")
x.write(buffer())
buffer().writeMessageEnd()
buffer.toArray
}
}
private[thrift] class ThriftServerTracingFilter(
serviceName: String,
boundAddress: InetSocketAddress,
protocolFactory: TProtocolFactory
) extends SimpleFilter[Array[Byte], Array[Byte]]
{
// Concurrency is not an issue here since we have an instance per
// channel, and receive only one request at a time (thrift does no
// pipelining). Furthermore, finagle will guarantee this by
// serializing requests.
private[this] var isUpgraded = false
private[this] lazy val successfulUpgradeReply = Future {
val buffer = new OutputBuffer(protocolFactory)
buffer().writeMessageBegin(
new TMessage(ThriftTracing.CanTraceMethodName, TMessageType.REPLY, 0))
val upgradeReply = new thrift.UpgradeReply
upgradeReply.write(buffer())
buffer().writeMessageEnd()
// Note: currently there are no options, so there's no need
// to parse them out.
buffer.toArray
}
def apply(request: Array[Byte], service: Service[Array[Byte], Array[Byte]]) = Trace.unwind {
// What to do on exceptions here?
if (isUpgraded) {
val header = new thrift.RequestHeader
val request_ = InputBuffer.peelMessage(request, header, protocolFactory)
val msg = new InputBuffer(request_, protocolFactory)().readMessageBegin()
val sampled = if (header.isSetSampled) Some(header.isSampled) else None
// if true, we trace this request. if None client does not trace, we get to decide
val traceId = TraceId(
if (header.isSetTrace_id) Some(SpanId(header.getTrace_id)) else None,
if (header.isSetParent_span_id) Some(SpanId(header.getParent_span_id)) else None,
SpanId(header.getSpan_id),
sampled,
if (header.isSetFlags) Flags(header.getFlags) else Flags())
Trace.setId(traceId)
Trace.recordRpcname(serviceName, msg.name)
Trace.record(Annotation.ServerRecv())
try {
ClientId.set(extractClientId(header))
service(request_) map {
case response if response.isEmpty => response
case response =>
Trace.record(Annotation.ServerSend())
val responseHeader = new thrift.ResponseHeader
ByteArrays.concat(OutputBuffer.messageToArray(responseHeader, protocolFactory), response)
}
} finally {
ClientId.clear()
}
} else {
val buffer = new InputBuffer(request, protocolFactory)
val msg = buffer().readMessageBegin()
// TODO: only try once?
if (msg.`type` == TMessageType.CALL &&
msg.name == ThriftTracing.CanTraceMethodName) {
val connectionOptions = new thrift.ConnectionOptions
connectionOptions.read(buffer())
// upgrade & reply.
isUpgraded = true
successfulUpgradeReply
} else {
// request from client without tracing support
Trace.recordRpcname(serviceName, msg.name)
Trace.record(Annotation.ServerRecv())
Trace.record("finagle.thrift.noUpgrade")
service(request) map { response =>
Trace.record(Annotation.ServerSend())
response
}
}
}
}
private[this] def extractClientId(header: thrift.RequestHeader) = {
Option(header.client_id) map { clientId => ClientId(clientId.name) }
}
}
|
firebase/finagle
|
finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ThriftServerFramedCodec.scala
|
Scala
|
apache-2.0
| 7,517
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.{StreamTableTestUtil, TableTestBase}
import org.junit.Test
class SortTest extends TableTestBase {
private val streamUtil: StreamTableTestUtil = streamTestUtil()
private val table = streamUtil.addTable[(Int, String, Long)]("MyTable", 'a, 'b, 'c,
'proctime.proctime, 'rowtime.rowtime)
@Test
def testSortProcessingTime(): Unit = {
val sqlQuery = "SELECT a FROM MyTable ORDER BY proctime, c"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode("DataStreamSort",
streamTableNode(table),
term("orderBy", "proctime ASC", "c ASC")),
term("select", "a", "PROCTIME(proctime) AS proctime", "c"))
streamUtil.verifySql(sqlQuery, expected)
}
@Test
def testSortRowTime(): Unit = {
val sqlQuery = "SELECT a FROM MyTable ORDER BY rowtime, c"
val expected =
unaryNode(
"DataStreamCalc",
unaryNode("DataStreamSort",
streamTableNode(table),
term("orderBy", "rowtime ASC, c ASC")),
term("select", "a", "rowtime", "c"))
streamUtil.verifySql(sqlQuery, expected)
}
}
|
tzulitai/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/sql/SortTest.scala
|
Scala
|
apache-2.0
| 2,179
|
//package io.skysail.core.app.resources
//
//import io.skysail.core.restlet.SkysailServerResource
//import io.skysail.core.restlet.resources.RedirectResourceOld
//import io.skysail.core.um.domain.Credentials
//
//class LoginResource extends RedirectResourceOld[Credentials] {
// def getEntity(): Credentials = null
//
// def redirectToResource(): SkysailServerResource[_] = {
// null
// }
//
//// override def redirectTo(): String = {
//// val app = getSkysailApplication()
//// if (app.isAuthenticated(getRequest())) {
//// return "/";
//// }
//// return app.getSkysailApplication().getAuthenticationService().getLoginPath();
//// }
//
//}
|
evandor/skysail-core
|
skysail.core/src/io/skysail/core/app/resources/LoginResource.scala
|
Scala
|
apache-2.0
| 665
|
package jp.seraphr.collection.wrapper
import java.util.List
import java.lang.Iterable
import jp.seraphr.collection.builder.ListWrapperBuilder
import jp.seraphr.common.Converter
import jp.seraphr.common.Tuple2
class ListWrapper[_Elem](aBase: List[_Elem]) extends Wrapper[_Elem, ListWrapper[_Elem]] {
type _Container[X] = List[X]
type _Base = List[_Elem]
type _This = ListWrapper[_Elem]
def map[_ToElem](aConverter: Converter[_Elem, _ToElem]): ListWrapper[_ToElem] = {
mapInner(aConverter)(new ListWrapperBuilder[_ToElem])
}
def zip[_ThatElem](aThat: Iterable[_ThatElem]): ListWrapper[Tuple2[_Elem, _ThatElem]] = {
zipInner(aThat)(new ListWrapperBuilder[Tuple2[_Elem, _ThatElem]])
}
override protected def myBuilder = new ListWrapperBuilder[_Elem]
override val unwrap = aBase
override protected def toIterable(aBase: _Base): _Container[_Elem] = {
// require(aBase != null)
aBase
}
}
|
seraphr/collection-wrapper
|
src/main/scala/jp/seraphr/collection/wrapper/ListWrapper.scala
|
Scala
|
bsd-2-clause
| 925
|
package astrac.springy
case class Book(isbn: String, title: String, author: String)
object Fixtures {
val sirensOfTitan = Book("0000000000000", "The Sirens Of Titan", "Kurt Vonnegut")
val protocolsOfTralfamadore = Book("1111111111111", "The Protocols of the Elders of Tralfamadore", "Kilgore Trout")
val gospelFromOuterSpace = Book("2222222222222", "The Gospel From Outer Space", "Kilgore Trout")
val slaughterhousFive = Book("3333333333333", "Slaughterhouse Five", "Kurt Vonnegut")
}
|
Astrac/springy
|
src/test/scala/astrac/springy/fixtures.scala
|
Scala
|
mit
| 495
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.plans.physical.SinglePartition
import org.apache.spark.sql.test.SharedSQLContext
class ExchangeSuite extends SparkPlanTest with SharedSQLContext {
test("shuffling UnsafeRows in exchange") {//在Shuffle交换中的不安全行
val input = (1 to 1000).map(Tuple1.apply)
checkAnswer(
input.toDF(),
plan => ConvertToSafe(Exchange(SinglePartition, ConvertToUnsafe(plan))),
input.map(Row.fromTuple)
)
}
}
|
tophua/spark1.52
|
sql/core/src/test/scala/org/apache/spark/sql/execution/ExchangeSuite.scala
|
Scala
|
apache-2.0
| 1,345
|
package com.twitter.scrooge
import com.twitter.scrooge.thrift_validation.ThriftConstraintValidator
object LongAnnotationValueConstraintValidator extends ThriftConstraintValidator[Long, Long] {
/**
* The IDL annotation for this constraint validator is validation.longEquals = "7L"
* where the annotation value is an integer.
*/
override def annotationClazz: Class[Long] = classOf[Long]
override def fieldClazz: Class[Long] = classOf[Long]
override def violationMessage(
obj: Long,
annotation: Long
): String = s"$obj does not equal to $annotation."
/** return true if the value of `obj` == the value of `annotation`. */
override def isValid(
obj: Long,
annotation: Long
): Boolean = obj == annotation
}
|
twitter/scrooge
|
scrooge-core/src/test/scala/com/twitter/scrooge/LongAnnotationValueConstraintValidator.scala
|
Scala
|
apache-2.0
| 749
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.