code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.ubirch.chain.core.actor.util import com.ubirch.chain.config.ChainConfig import akka.routing.RoundRobinPool /** * author: cvandrei * since: 2017-06-26 */ trait ActorTools { def roundRobin(): RoundRobinPool = new RoundRobinPool(ChainConfig.akkaNumberOfWorkers) def sqsEndpoint(sqsQueueName: String): String = { s"aws-sqs://$sqsQueueName?region=${ChainConfig.awsRegion}&queueOwnerAWSAccountId=${ChainConfig.awsQueueOwnerId}&accessKey=${ChainConfig.awsAccessKey}&secretKey=${ChainConfig.awsSecretAccessKey}" } def sqsEndpointConsumer(queue: String): String = { s"${sqsEndpoint(queue)}&maxMessagesPerPoll=${ChainConfig.awsSqsMaxMessagesPerPoll}" } }
ubirch/ubirch-chain-service
core/src/main/scala/com/ubirch/chain/core/actor/util/ActorTools.scala
Scala
apache-2.0
688
package org.joda.time import java.io.ObjectInputStream import java.io.ObjectOutputStream import java.io.Serializable import java.util.Calendar import java.util.Date import java.util.Locale import org.joda.convert.{ToString, FromString} import org.joda.time.base.BaseLocal import org.joda.time.chrono.ISOChronology import org.joda.time.convert.ConverterManager import org.joda.time.field.AbstractReadableInstantFieldProperty import org.joda.time.format.DateTimeFormat import org.joda.time.format.DateTimeFormatter import org.joda.time.format.ISODateTimeFormat import LocalTime._ object LocalTime { val MIDNIGHT = new LocalTime(0, 0, 0, 0) private val HOUR_OF_DAY = 0 private val MINUTE_OF_HOUR = 1 private val SECOND_OF_MINUTE = 2 private val MILLIS_OF_SECOND = 3 private val TIME_DURATION_TYPES = new collection.mutable.HashSet[DurationFieldType]() TIME_DURATION_TYPES.add(DurationFieldType.millis()) TIME_DURATION_TYPES.add(DurationFieldType.seconds()) TIME_DURATION_TYPES.add(DurationFieldType.minutes()) TIME_DURATION_TYPES.add(DurationFieldType.hours()) def now(): LocalTime = new LocalTime() def now(zone: DateTimeZone): LocalTime = { if (zone == null) { throw new NullPointerException("Zone must not be null") } new LocalTime(zone) } def now(chronology: Chronology): LocalTime = { if (chronology == null) { throw new NullPointerException("Chronology must not be null") } new LocalTime(chronology) } @FromString def parse(str: String): LocalTime = { parse(str, ISODateTimeFormat.localTimeParser()) } def parse(str: String, formatter: DateTimeFormatter): LocalTime = formatter.parseLocalTime(str) def fromMillisOfDay(millisOfDay: Long): LocalTime = fromMillisOfDay(millisOfDay, null) def fromMillisOfDay(millisOfDay: Long, chrono: Chronology): LocalTime = { var _chrono: Chronology = chrono _chrono = DateTimeUtils.getChronology(_chrono).withUTC() new LocalTime(millisOfDay, _chrono) } def fromCalendarFields(calendar: Calendar): LocalTime = { if (calendar == null) { throw new IllegalArgumentException("The calendar must not be null") } new LocalTime(calendar.get(Calendar.HOUR_OF_DAY), calendar.get(Calendar.MINUTE), calendar.get(Calendar.SECOND), calendar.get(Calendar.MILLISECOND)) } def fromDateFields(date: Date): LocalTime = { if (date == null) { throw new IllegalArgumentException("The date must not be null") } new LocalTime(date.getHours, date.getMinutes, date.getSeconds, ((date.getTime % 1000).toInt + 1000) % 1000) } @SerialVersionUID(-325842547277223L) class Property(@transient private var iInstant: LocalTime, @transient private var iField: DateTimeField) extends AbstractReadableInstantFieldProperty() { private def writeObject(oos: ObjectOutputStream) { oos.writeObject(iInstant) oos.writeObject(iField.getType) } private def readObject(oos: ObjectInputStream) { iInstant = oos.readObject().asInstanceOf[LocalTime] val `type` = oos.readObject().asInstanceOf[DateTimeFieldType] iField = `type`.getField(iInstant.getChronology) } def getField(): DateTimeField = iField protected def getMillis(): Long = iInstant.getLocalMillis override protected def getChronology(): Chronology = iInstant.getChronology def getLocalTime(): LocalTime = iInstant def addCopy(value: Int): LocalTime = { iInstant.withLocalMillis(iField.add(iInstant.getLocalMillis, value)) } def addCopy(value: Long): LocalTime = { iInstant.withLocalMillis(iField.add(iInstant.getLocalMillis, value)) } def addNoWrapToCopy(value: Int): LocalTime = { val millis = iField.add(iInstant.getLocalMillis, value) val rounded = iInstant.getChronology.millisOfDay().get(millis) if (rounded != millis) { throw new IllegalArgumentException( "The addition exceeded the boundaries of LocalTime") } iInstant.withLocalMillis(millis) } def addWrapFieldToCopy(value: Int): LocalTime = { iInstant.withLocalMillis( iField.addWrapField(iInstant.getLocalMillis, value)) } def setCopy(value: Int): LocalTime = { iInstant.withLocalMillis(iField.set(iInstant.getLocalMillis, value)) } def setCopy(text: String, locale: Locale): LocalTime = { iInstant.withLocalMillis( iField.set(iInstant.getLocalMillis, text, locale)) } def setCopy(text: String): LocalTime = setCopy(text, null) def withMaximumValue(): LocalTime = setCopy(getMaximumValue) def withMinimumValue(): LocalTime = setCopy(getMinimumValue) def roundFloorCopy(): LocalTime = { iInstant.withLocalMillis(iField.roundFloor(iInstant.getLocalMillis)) } def roundCeilingCopy(): LocalTime = { iInstant.withLocalMillis(iField.roundCeiling(iInstant.getLocalMillis)) } def roundHalfFloorCopy(): LocalTime = { iInstant.withLocalMillis(iField.roundHalfFloor(iInstant.getLocalMillis)) } def roundHalfCeilingCopy(): LocalTime = { iInstant.withLocalMillis( iField.roundHalfCeiling(iInstant.getLocalMillis)) } def roundHalfEvenCopy(): LocalTime = { iInstant.withLocalMillis(iField.roundHalfEven(iInstant.getLocalMillis)) } } } @SerialVersionUID(-12873158713873L) class LocalTime(instant: Long, private var chronology: Chronology) extends BaseLocal with ReadablePartial with Serializable { private val localMillis = chronology.getZone.getMillisKeepLocal(DateTimeZone.UTC, instant) private var iChronology: Chronology = null private var iLocalMillis: Long = chronology.millisOfDay().get(localMillis) chronology = DateTimeUtils.getChronology(chronology) chronology = chronology.withUTC() iLocalMillis = chronology.millisOfDay.get(localMillis) iChronology = chronology def this() { this(DateTimeUtils.currentTimeMillis(), ISOChronology.getInstance) } def this(zone: DateTimeZone) { this(DateTimeUtils.currentTimeMillis(), ISOChronology.getInstance(zone)) } def this(chronology: Chronology) { this(DateTimeUtils.currentTimeMillis(), chronology) } def this(instant: Long) { this(instant, ISOChronology.getInstance) } def this(instant: Long, zone: DateTimeZone) { this(instant, ISOChronology.getInstance(zone)) } def this(instant: AnyRef, zone: DateTimeZone) { this() val converter = ConverterManager.getInstance.getPartialConverter(instant) var chronology = converter.getChronology(instant, zone) chronology = DateTimeUtils.getChronology(chronology) iChronology = chronology.withUTC() val values = converter.getPartialValues( this, instant, chronology, ISODateTimeFormat.localTimeParser()) iLocalMillis = iChronology .getDateTimeMillis(0L, values(0), values(1), values(2), values(3)) } def this(instant: AnyRef, chronology: Chronology) { this() var _chronology: Chronology = chronology val converter = ConverterManager.getInstance.getPartialConverter(instant) _chronology = converter.getChronology(instant, _chronology) _chronology = DateTimeUtils.getChronology(_chronology) iChronology = _chronology.withUTC() val values = converter.getPartialValues( this, instant, _chronology, ISODateTimeFormat.localTimeParser()) iLocalMillis = iChronology .getDateTimeMillis(0L, values(0), values(1), values(2), values(3)) } def this(instant: AnyRef) { this(instant, null.asInstanceOf[Chronology]) } def this(hourOfDay: Int, minuteOfHour: Int, secondOfMinute: Int, millisOfSecond: Int, chronology: Chronology) { this() var _chronology: Chronology = chronology _chronology = DateTimeUtils.getChronology(_chronology).withUTC() val instant = _chronology.getDateTimeMillis(0L, hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond) iChronology = _chronology iLocalMillis = instant } def this(hourOfDay: Int, minuteOfHour: Int) { this(hourOfDay, minuteOfHour, 0, 0, ISOChronology.getInstanceUTC) } def this(hourOfDay: Int, minuteOfHour: Int, secondOfMinute: Int) { this(hourOfDay, minuteOfHour, secondOfMinute, 0, ISOChronology.getInstanceUTC) } def this(hourOfDay: Int, minuteOfHour: Int, secondOfMinute: Int, millisOfSecond: Int) { this(hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond, ISOChronology.getInstanceUTC) } private def readResolve(): AnyRef = { if (iChronology == null) { return new LocalTime(iLocalMillis, ISOChronology.getInstanceUTC) } if (!(DateTimeZone.UTC == iChronology.getZone)) { return new LocalTime(iLocalMillis, iChronology.withUTC()) } this } def size(): Int = 4 protected def getField(index: Int, chrono: Chronology): DateTimeField = index match { case HOUR_OF_DAY => chrono.hourOfDay() case MINUTE_OF_HOUR => chrono.minuteOfHour() case SECOND_OF_MINUTE => chrono.secondOfMinute() case MILLIS_OF_SECOND => chrono.millisOfSecond() case _ => throw new IndexOutOfBoundsException("Invalid index: " + index) } def getValue(index: Int): Int = index match { case HOUR_OF_DAY => getChronology.hourOfDay().get(getLocalMillis) case MINUTE_OF_HOUR => getChronology.minuteOfHour().get(getLocalMillis) case SECOND_OF_MINUTE => getChronology.secondOfMinute().get(getLocalMillis) case MILLIS_OF_SECOND => getChronology.millisOfSecond().get(getLocalMillis) case _ => throw new IndexOutOfBoundsException("Invalid index: " + index) } override def get(fieldType: DateTimeFieldType): Int = { if (fieldType == null) { throw new IllegalArgumentException( "The DateTimeFieldType must not be null") } if (!isSupported(fieldType)) { throw new IllegalArgumentException( "Field '" + fieldType + "' is not supported") } fieldType.getField(getChronology).get(getLocalMillis) } override def isSupported(`type`: DateTimeFieldType): Boolean = { if (`type` == null) { return false } if (isSupported(`type`.getDurationType) == false) { return false } val range = `type`.getRangeDurationType isSupported(range) || range == DurationFieldType.days() } def isSupported(`type`: DurationFieldType): Boolean = { if (`type` == null) { return false } val field = `type`.getField(getChronology) if (TIME_DURATION_TYPES.contains(`type`) || field.getUnitMillis < getChronology.days().getUnitMillis) { return field.isSupported } false } def getLocalMillis(): Long = iLocalMillis def getChronology(): Chronology = iChronology override def equals(partial: Any): Boolean = { partial match { case other: LocalTime => if (this eq other) { return true } if (iChronology == other.iChronology) { return iLocalMillis == other.iLocalMillis } case _ => } super.equals(partial) } override def compareTo(partial: ReadablePartial): Int = { if (this == partial) { return 0 } if (partial.isInstanceOf[LocalTime]) { val other = partial.asInstanceOf[LocalTime] if (iChronology == other.iChronology) { return if (iLocalMillis < other.iLocalMillis) -1 else if (iLocalMillis == other.iLocalMillis) 0 else 1 } } super.compareTo(partial) } def withLocalMillis(newMillis: Long): LocalTime = { if (newMillis == getLocalMillis) this else new LocalTime(newMillis, getChronology) } def withFields(partial: ReadablePartial): LocalTime = { if (partial == null) { return this } withLocalMillis(getChronology.set(partial, getLocalMillis)) } def withField(fieldType: DateTimeFieldType, value: Int): LocalTime = { if (fieldType == null) { throw new IllegalArgumentException("Field must not be null") } if (!isSupported(fieldType)) { throw new IllegalArgumentException( "Field '" + fieldType + "' is not supported") } val instant = fieldType.getField(getChronology).set(getLocalMillis, value) withLocalMillis(instant) } def withFieldAdded(fieldType: DurationFieldType, amount: Int): LocalTime = { if (fieldType == null) { throw new IllegalArgumentException("Field must not be null") } if (!isSupported(fieldType)) { throw new IllegalArgumentException( "Field '" + fieldType + "' is not supported") } if (amount == 0) { return this } val instant = fieldType.getField(getChronology).add(getLocalMillis, amount) withLocalMillis(instant) } def withPeriodAdded(period: ReadablePeriod, scalar: Int): LocalTime = { if (period == null || scalar == 0) { return this } val instant = getChronology.add(period, getLocalMillis, scalar) withLocalMillis(instant) } def plus(period: ReadablePeriod): LocalTime = withPeriodAdded(period, 1) def plusHours(hours: Int): LocalTime = { if (hours == 0) { return this } val instant = getChronology.hours().add(getLocalMillis, hours) withLocalMillis(instant) } def plusMinutes(minutes: Int): LocalTime = { if (minutes == 0) { return this } val instant = getChronology.minutes().add(getLocalMillis, minutes) withLocalMillis(instant) } def plusSeconds(seconds: Int): LocalTime = { if (seconds == 0) { return this } val instant = getChronology.seconds().add(getLocalMillis, seconds) withLocalMillis(instant) } def plusMillis(millis: Int): LocalTime = { if (millis == 0) { return this } val instant = getChronology.millis().add(getLocalMillis, millis) withLocalMillis(instant) } def minus(period: ReadablePeriod): LocalTime = withPeriodAdded(period, -1) def minusHours(hours: Int): LocalTime = { if (hours == 0) { return this } val instant = getChronology.hours().subtract(getLocalMillis, hours) withLocalMillis(instant) } def minusMinutes(minutes: Int): LocalTime = { if (minutes == 0) { return this } val instant = getChronology.minutes().subtract(getLocalMillis, minutes) withLocalMillis(instant) } def minusSeconds(seconds: Int): LocalTime = { if (seconds == 0) { return this } val instant = getChronology.seconds().subtract(getLocalMillis, seconds) withLocalMillis(instant) } def minusMillis(millis: Int): LocalTime = { if (millis == 0) { return this } val instant = getChronology.millis().subtract(getLocalMillis, millis) withLocalMillis(instant) } def property(fieldType: DateTimeFieldType): Property = { if (fieldType == null) { throw new IllegalArgumentException( "The DateTimeFieldType must not be null") } if (isSupported(fieldType) == false) { throw new IllegalArgumentException( "Field '" + fieldType + "' is not supported") } new Property(this, fieldType.getField(getChronology)) } def getHourOfDay(): Int = { getChronology.hourOfDay().get(getLocalMillis) } def getMinuteOfHour(): Int = { getChronology.minuteOfHour().get(getLocalMillis) } def getSecondOfMinute(): Int = { getChronology.secondOfMinute().get(getLocalMillis) } def getMillisOfSecond(): Int = { getChronology.millisOfSecond().get(getLocalMillis) } def getMillisOfDay(): Int = { getChronology.millisOfDay().get(getLocalMillis) } def withHourOfDay(hour: Int): LocalTime = { withLocalMillis(getChronology.hourOfDay().set(getLocalMillis, hour)) } def withMinuteOfHour(minute: Int): LocalTime = { withLocalMillis(getChronology.minuteOfHour().set(getLocalMillis, minute)) } def withSecondOfMinute(second: Int): LocalTime = { withLocalMillis(getChronology.secondOfMinute().set(getLocalMillis, second)) } def withMillisOfSecond(millis: Int): LocalTime = { withLocalMillis(getChronology.millisOfSecond().set(getLocalMillis, millis)) } def withMillisOfDay(millis: Int): LocalTime = { withLocalMillis(getChronology.millisOfDay().set(getLocalMillis, millis)) } def hourOfDay(): Property = { new Property(this, getChronology.hourOfDay()) } def minuteOfHour(): Property = { new Property(this, getChronology.minuteOfHour()) } def secondOfMinute(): Property = { new Property(this, getChronology.secondOfMinute()) } def millisOfSecond(): Property = { new Property(this, getChronology.millisOfSecond()) } def millisOfDay(): Property = { new Property(this, getChronology.millisOfDay()) } def toDateTimeToday(): DateTime = toDateTimeToday(null) def toDateTimeToday(zone: DateTimeZone): DateTime = { val chrono = getChronology.withZone(zone) val instantMillis = DateTimeUtils.currentTimeMillis() val resolved = chrono.set(this, instantMillis) new DateTime(resolved, chrono) } @ToString override def toString(): String = ISODateTimeFormat.time().print(this) def toString(pattern: String): String = { if (pattern == null) { return toString } DateTimeFormat.forPattern(pattern).print(this) } def toString(pattern: String, locale: Locale): String = { if (pattern == null) { return toString } DateTimeFormat.forPattern(pattern).withLocale(locale).print(this) } }
mdedetrich/soda-time
js/src/main/scala/org/joda/time/LocalTime.scala
Scala
bsd-2-clause
17,875
package blended.jms.utils.internal import java.text.SimpleDateFormat import java.util.Date import java.util.concurrent.atomic.AtomicLong import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props} import akka.event.LoggingReceive import blended.jms.utils.internal.ConnectionState._ import blended.jms.utils.{BlendedJMSConnection, ConnectionConfig, ConnectionException} import javax.jms.Connection import scala.concurrent.duration._ object ConnectionStateManager { def props( config: ConnectionConfig, monitor: ActorRef, holder: ConnectionHolder ) : Props = Props(new ConnectionStateManager(config, monitor, holder)) } class ConnectionStateManager(config: ConnectionConfig, monitor: ActorRef, holder: ConnectionHolder) extends Actor with ActorLogging { type StateReceive = ConnectionState => Receive val df = new SimpleDateFormat("yyyyMMdd-HHmmss-SSS") implicit val eCtxt = context.system.dispatcher val provider = config.provider val vendor = config.vendor var conn : Option[BlendedJMSConnection] = None var currentReceive : StateReceive = disconnected() var currentState : ConnectionState = ConnectionState(provider = config.provider).copy(status = DISCONNECTED) val pingCounter = new AtomicLong(0) var pinger : Option[ActorRef] = None // the retry Schedule is the time interval we retry a connection after a failed connect attempt // usually that is only a fraction of the ping interval (i.e. 5 seconds) val retrySchedule : FiniteDuration = config.retryInterval // The schedule is the interval for the normal connection ping val schedule : FiniteDuration = config.pingInterval // The ping timer is used to schedule ping messages over the underlying connection to check it's // health var pingTimer : Option[Cancellable] = None // To this actor we delegate all connect and close operations for the underlying JMS provider val controller = context.actorOf(JmsConnectionController.props(holder)) // If something causes an unexpected restart, we want to know override def preRestart(reason: Throwable, message: Option[Any]): Unit = { log.error(s"Error encountered in Connection State Manager [$provider] : [${reason.getMessage}], restarting ...") super.preRestart(reason, message) } // We clean up our JMS connections override def postStop(): Unit = { log.info(s"Stopping Connection State Manager for provider [$provider].") disconnect(currentState) super.postStop() } // The initial state is disconnected override def receive: Actor.Receive = Actor.emptyBehavior override def preStart(): Unit = { super.preStart() switchState(disconnected(), currentState) context.system.eventStream.subscribe(self, classOf[ConnectionCommand]) context.system.eventStream.subscribe(self, classOf[ConnectionException]) } // ---- State: Disconnected def disconnected()(state: ConnectionState) : Receive = LoggingReceive { // Upon a CheckConnection message we will kick off initiating and monitoring the connection case cc : CheckConnection => pingTimer = None initConnection(state, cc.now) } // ---- State: Connected def connected()(state: ConnectionState) : Receive = { // we simply eat up the CloseTimeOut messages that might still be going for previous // connect attempts case ConnectTimeout(_) => // do nothing, this will just get rid of irrelevant warnings in the log // If we are already connected we simply try to ping the underlying connection case cc : CheckConnection => pingTimer = None conn.foreach( ping ) case d @ Disconnect(_) => disconnect(state) // For a successful ping we log the event and schedule the next connectionCheck case PingSuccess(m) => pinger = None switchState( connected(), publishEvents(state, s"JMS connection for provider [$vendor:$provider] seems healthy [$m].").copy(failedPings = 0) ) checkConnection(schedule) case PingFailed(t) => pinger = None checkReconnect( publishEvents(state, s"Error sending connection ping for provider [$vendor:$provider] : [${t.getMessage()}].") .copy(failedPings = state.failedPings + 1) ) case PingTimeout => pinger = None checkReconnect( publishEvents(state, s"Ping for provider [$vendor:$provider] timed out.") .copy(failedPings = state.failedPings + 1) ) } // ---- State: Connecting def connecting()(state: ConnectionState) : Receive = { case cc : CheckConnection => pingTimer = None case ConnectResult(t, Left(e)) => if (t == state.lastConnectAttempt.getOrElse(0l)) { switchState(disconnected(), state.copy(status = DISCONNECTED)) if (!checkRestartForFailedReconnect(state, e)) { checkConnection(retrySchedule) } } // We successfully connected, record the connection and timestamps case ConnectResult(t, Right(c)) => if (t == state.lastConnectAttempt.getOrElse(0l)) { conn = Some(new BlendedJMSConnection(c)) checkConnection(schedule) switchState(connected(), publishEvents(state, s"Successfully connected to provider [$vendor:$provider]").copy( status = CONNECTED, firstReconnectAttempt = None, lastConnect = Some(new Date()), failedPings = 0 )) } case ConnectTimeout(t) => if (t == state.lastConnectAttempt.getOrElse(0l)) { switchState(disconnected(), state.copy(status = DISCONNECTED)) checkConnection(retrySchedule) } } // State: Closing def closing()(state: ConnectionState) : Receive = { case cc : CheckConnection => pingTimer = None // All good, happily disconnected case ConnectionClosed => conn = None checkConnection(config.minReconnect, true) switchState( disconnected(), publishEvents(state, s"Connection for provider [$vendor:$provider] successfully closed.") .copy(status = DISCONNECTED, lastDisconnect = Some(new Date())) ) val n = 2 // Once we encounter a timeout for a connection close we initiate a Container Restart via the monitor case CloseTimeout => val e = new Exception(s"Unable to close connection for provider [$vendor:$provider] in [${config.minReconnect}]s]. Restarting container ...") monitor ! RestartContainer(e) } def jmxOperations(state : ConnectionState) : Receive = { case cmd : ConnectionCommand => if (cmd.vendor == vendor && cmd.provider == provider) { if (cmd.disconnectPending) disconnect(state) else if (cmd.connectPending) self ! CheckConnection(cmd.reconnectNow) } } def handleConnectionError(state : ConnectionState) : Receive = { case ce : ConnectionException => if (ce.vendor == vendor && ce.provider == provider) { log.info(s"Initiating reconnect for [$vendor:$provider] after connection exception [${ce.e.getMessage()}]") reconnect(state) } } // helper methods // A convenience method to let us know which state we are switching to private[this] def switchState(rec: StateReceive, newState: ConnectionState) : Unit = { val nextState = publishEvents(newState, s"Connection State Manager [$vendor:$provider] switching to state [${newState.status}]") currentReceive = rec currentState = nextState monitor ! ConnectionStateChanged(nextState) context.become(LoggingReceive ( rec(nextState) .orElse(jmxOperations(nextState)) .orElse(handleConnectionError(nextState)) .orElse(unhandled)) ) } // A convenience method to capture unhandled messages def unhandled : Receive = { case m => log.debug(s"received unhandled message for [$vendor:$provider] : ${m.toString()}") } // We simply stay in the same state and maintain the list of events def publishEvents(s : ConnectionState, msg: String*) : ConnectionState = { msg.foreach(m => log.info(m)) val tsMsg = msg.map { m => df.format(new Date()) + " " + m } val newEvents = if (tsMsg.size >= s.maxEvents) tsMsg.reverse.take(s.maxEvents) else tsMsg.reverse ++ s.events.take(s.maxEvents - tsMsg.size) s.copy(events = newEvents.toList) } // To initialise the connection we check whether we have been connected at some point in the history of // this container has been started. If so, we will let not attempt to reconnect before the time specified // in the config has passed. // If not, we will try to connect immediately. private[this] def initConnection(s: ConnectionState, now : Boolean) : Unit = { val remaining : Double = s.lastDisconnect match { case None => 0 case Some(l) => config.minReconnect.toMillis - (System.currentTimeMillis() - l.getTime()) } // if we were ever disconnected from the JMS provider since the container start we will check // whether the reconnect interval has passed, otherwise we will connect immediately if (!now && s.lastDisconnect.isDefined && remaining > 0) { switchState( currentReceive, publishEvents(s, s"Container is waiting to reconnect for provider [$vendor:$provider], remaining wait time [${remaining / 1000.0}]s") ) checkConnection((remaining + 1).seconds) } else { switchState(connecting(), connect(s)) } } // A simple convenience method to schedule the next connection check to ourselves private[this] def checkConnection(delay : FiniteDuration, force : Boolean = false) : Unit = { if (force) { pingTimer.foreach(_.cancel()) pingTimer = None } if (pingTimer.isEmpty) { pingTimer = Some(context.system.scheduler.scheduleOnce(delay, self, CheckConnection(false))) } } private[this] def connect(state: ConnectionState) : ConnectionState = { var events : List[String] = List(s"Creating connection to JMS provider [$vendor:$provider]") val lastConnectAttempt = new Date() context.system.scheduler.scheduleOnce(30.seconds, self, ConnectTimeout(lastConnectAttempt.getTime())) // This only happens if we have configured a maximum reconnect timeout in the config and we ever // had a connection since this container was last restarted and we haven't started the timer yet val newState = if (config.maxReconnectTimeout.isDefined && state.firstReconnectAttempt.isEmpty && state.lastDisconnect.isDefined) { events = (s"Starting max reconnect timeout monitor for provider [$vendor:$provider] with [${config.maxReconnectTimeout}]s") :: events state.copy(firstReconnectAttempt = Some(lastConnectAttempt)) } else { state } controller ! Connect(lastConnectAttempt, config.clientId) // push the events into the newState in reverse order and set // the new state name publishEvents(newState, events.reverse.toArray:_*).copy( status = CONNECTING, lastConnectAttempt = Some(lastConnectAttempt) ) } private[this] def checkRestartForFailedReconnect(s: ConnectionState, e: Throwable): Boolean = { var result = false log.error(e, s"Error connecting to JMS provider [$vendor:$provider].") if (config.maxReconnectTimeout.isDefined && s.firstReconnectAttempt.isDefined) { s.firstReconnectAttempt.foreach { t => val restart : Boolean = config.maxReconnectTimeout.exists{ to => (System.currentTimeMillis() - t.getTime()).millis > to } if (restart) { val e = new Exception(s"Unable to reconnect to JMS provider [$vendor:$provider] in [${config.maxReconnectTimeout}]s. Restarting container ...") monitor ! RestartContainer(e) result = true } } } result } // Once we decided to close the connection we cancel our PingTimer // and start to cleanup. This goes into it's own state, so that we can // catch connections that cannot be closed within a reasonable time. // Experience has shown that for a close timeout it is best to restart // the container. private[this] def disconnect(s : ConnectionState) : Unit = { pingTimer.foreach(_.cancel()) pingTimer = None // Notify the connection controller of the disconnect controller ! Disconnect(config.minReconnect) switchState(closing(), s.copy(status = CLOSING)) } // A reconnect is only schedule if we have reached the maximumPingTolerance for the connection // Otherwise we schedule a connection check for the retry schedule, which is usually much shorter // than the normal connection check private[this] def checkReconnect(s: ConnectionState) : Unit = { log.debug(s"Checking reconnect for provider [$vendor:$provider] state [$s] against tolerance [${config.pingTolerance}]") if (s.failedPings == config.pingTolerance) { reconnect( publishEvents(s, s"Maximum ping tolerance for provider [$vendor:$provider] reached .... reconnecting.") ) } else { switchState(currentReceive, s) checkConnection(retrySchedule) } } private[this] def reconnect(s: ConnectionState) : Unit = { disconnect(s) checkConnection(config.minReconnect + 1.seconds) } private[this] def ping(c: Connection) : Unit = { if (config.pingEnabled) { pinger match { case None => log.info(s"Checking JMS connection for provider [$vendor:$provider]") pinger = Some(context.actorOf(JmsPingPerformer.props(config, c, new DefaultPingOperations()))) pinger.foreach(_ ! ExecutePing(self, pingCounter.getAndIncrement())) case Some(a) => log.debug(s"Ignoring ping request for provider [$provider] as one pinger is already active.") } } else { log.info(s"Ping is disabled for connection factory [${config.vendor}:${config.provider}]") } } }
lefou/blended
blended.jms.utils/src/main/scala/blended/jms/utils/internal/ConnectionStateManager.scala
Scala
apache-2.0
13,892
package infcalcs import ParameterFuncs._ import EstimateCC.{ estimateCC, estimateCCBS, estimateCCVerbose, calculateWithoutEstimator } import IOFile.{loadList, importParameters} import akka.actor.{ActorSystem, Props} import infcalcs.actors.{AdaptiveDistributor, Init, FixedDistributor} /** * Top-level main function for channel capacity calculation. * * - Collects command-line arguments; * - Loads the data; * - Sets configuration parameters; * - Generates unimodal and bimodal weights; * - Calculates channel capacity for each weighting scheme. */ object EstCC extends App with CLOpts { val appConfig = parser.parse(args, Config()) getOrElse { System.exit(0) new Config() } val dataFile = appConfig.dataFile val paramFile = if (appConfig.paramFile == "") None else Some(appConfig.paramFile) val rawParameters = importParameters(paramFile) val parameters = updateParameters(rawParameters) if (appConfig.verbose) { println("\\nVerbose mode\\n") } implicit val calcConfig = CalcConfig(parameters) val p = loadList(dataFile) if (appConfig.verbose) { calcConfig.parameters.print println("\\nResults:\\n") } if (appConfig.noReg) { calculateWithoutEstimator(p) System exit 0 } // Calculate and output estimated mutual information values given calculated weights if (appConfig.cores > 1) { val system = ActorSystem("EstCC") val numCalculators = appConfig.cores - 1 val distributor = if (calcConfig.defSigVals) system actorOf (Props(new FixedDistributor(p)), "dist") else system actorOf (Props(new AdaptiveDistributor(p)), "dist") distributor ! Init(numCalculators) system.awaitTermination() } else if (calcConfig.numParameters("numForBootstrap") > 0) estimateCCBS(p) else if (appConfig.verbose) estimateCCVerbose(p) else estimateCC(p, calcConfig.initSignalBins) }
ryants/EstCC
src/main/scala/infcalcs/EstCC.scala
Scala
mit
1,884
package com.mesosphere.cosmos private[cosmos] case class ConnectionDetails(host: String, port: Int, tls: Boolean = false)
dcos/cosmos
cosmos-server/src/main/scala/com/mesosphere/cosmos/ConnectionDetails.scala
Scala
apache-2.0
123
package supertaggedtests.misc import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpec import shapeless.test.illTyped /** * Nuance with implicit `+` when working with newtypes * * The problem is with any2stringadd from Predef.scala * // scala/bug#8229 retaining the pre 2.11 name for source compatibility in shadowing this implicit /** @group implicit-classes-any */ implicit final class any2stringadd[A](private val self: A) extends AnyVal { def +(other: String): String = String.valueOf(self) + other } * * It is always in scope (without any imports), and wins searching for implicits in zero round, * because of has no any alternatives without additional imports. * * From 2.13 it is deprecated and will be removed in future (years later...), so now we need to help compiler a little bit. * For all over methods from ops (including other arithmetic -,/,*) - work well without imports. * */ class PlusNuance extends AnyFlatSpec with Matchers { "`$plus`" should "fail with no imports" in { import supertaggedtests.newtypes.Step1 val step1 = Step1(5) illTyped("step1 + step1","type mismatch;.+") } "Explicit additional common import" should "work" in { import supertaggedtests.newtypes.Step1 import supertagged.newtypeOps val step1 = Step1(5) val stepPlus = step1 + step1 stepPlus shouldBe 10 } /** * Look at PlusNuanceShadowing.scala (it works only with top level shadow) */ // "Shadowing Predef" should "work" in { // // import Predef.{any2stringadd => _,_} // import supertaggedtests.newtypes.Step1 // // val step1 = Step1(5) // // val stepPlus = step1 + step1 // // stepPlus shouldBe 10 // } }
Rudogma/scala-supertagged
tests/src/test/scala/supertaggedtests/misc/PlusNuance.scala
Scala
mit
1,748
package com.ldaniels528.broadway.core.resources /** * Represents a generic resource * @author Lawrence Daniels <lawrence.daniels@gmail.com> */ trait Resource extends Serializable { /** * Returns an option of the resource name * @return an option of the resource name */ def getResourceName: Option[String] }
ldaniels528/shocktrade-broadway-server
src/main/scala/com/ldaniels528/broadway/core/resources/Resource.scala
Scala
apache-2.0
327
/* * Copyright 2014–2017 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.tpe import org.scalacheck._ import scalaz.Enum import scalaz.std.option._ import scalaz.syntax.apply._ import scalaz.syntax.enum._ trait SimpleTypeArbitrary { implicit def simpleTypeArbitrary: Arbitrary[SimpleType] = { val en = Enum[SimpleType] Arbitrary((en.min |@| en.max)(_ |-> _).fold(Gen.fail[SimpleType])(Gen.oneOf(_))) } } object SimpleTypeArbitrary extends SimpleTypeArbitrary
drostron/quasar
frontend/src/test/scala/quasar/tpe/SimpleTypeArbitrary.scala
Scala
apache-2.0
1,020
/* __ *\ ** ________ ___ / / ___ __ ____ Scala.js tools ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL ** ** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ ** ** /____/\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \* */ package org.scalajs.core.tools.optimizer import scala.collection.{GenTraversableOnce, GenIterable} import scala.collection.mutable import org.scalajs.core.tools.sem.Semantics class IncOptimizer(semantics: Semantics, considerPositions: Boolean) extends GenIncOptimizer(semantics, considerPositions) { protected object CollOps extends GenIncOptimizer.AbsCollOps { type Map[K, V] = mutable.Map[K, V] type ParMap[K, V] = mutable.Map[K, V] type AccMap[K, V] = mutable.Map[K, mutable.ListBuffer[V]] type ParIterable[V] = mutable.ListBuffer[V] type Addable[V] = mutable.ListBuffer[V] def emptyAccMap[K, V]: AccMap[K, V] = mutable.Map.empty def emptyMap[K, V]: Map[K, V] = mutable.Map.empty def emptyParMap[K, V]: ParMap[K, V] = mutable.Map.empty def emptyParIterable[V]: ParIterable[V] = mutable.ListBuffer.empty // Operations on ParMap def put[K, V](map: ParMap[K, V], k: K, v: V): Unit = map.put(k, v) def remove[K, V](map: ParMap[K, V], k: K): Option[V] = map.remove(k) def retain[K, V](map: ParMap[K, V])(p: (K, V) => Boolean): Unit = map.retain(p) // Operations on AccMap def acc[K, V](map: AccMap[K, V], k: K, v: V): Unit = map.getOrElseUpdate(k, mutable.ListBuffer.empty) += v def getAcc[K, V](map: AccMap[K, V], k: K): GenIterable[V] = map.getOrElse(k, Nil) def parFlatMapKeys[A, B](map: AccMap[A, _])( f: A => GenTraversableOnce[B]): GenIterable[B] = map.keys.flatMap(f).toList // Operations on ParIterable def prepAdd[V](it: ParIterable[V]): Addable[V] = it def add[V](addable: Addable[V], v: V): Unit = addable += v def finishAdd[V](addable: Addable[V]): ParIterable[V] = addable } private val _interfaces = mutable.Map.empty[String, InterfaceType] protected def getInterface(encodedName: String): InterfaceType = _interfaces.getOrElseUpdate(encodedName, new SeqInterfaceType(encodedName)) private val methodsToProcess = mutable.ListBuffer.empty[MethodImpl] protected def scheduleMethod(method: MethodImpl): Unit = methodsToProcess += method protected def newMethodImpl(owner: MethodContainer, encodedName: String): MethodImpl = new SeqMethodImpl(owner, encodedName) protected def processAllTaggedMethods(): Unit = { logProcessingMethods(methodsToProcess.count(!_.deleted)) for (method <- methodsToProcess) method.process() methodsToProcess.clear() } private class SeqInterfaceType(encName: String) extends InterfaceType(encName) { private val ancestorsAskers = mutable.Set.empty[MethodImpl] private val dynamicCallers = mutable.Map.empty[String, mutable.Set[MethodImpl]] private val staticCallers = mutable.Map.empty[String, mutable.Set[MethodImpl]] private val callersOfStatic = mutable.Map.empty[String, mutable.Set[MethodImpl]] private var _ancestors: List[String] = encodedName :: Nil private var _instantiatedSubclasses: Set[Class] = Set.empty def instantiatedSubclasses: Iterable[Class] = _instantiatedSubclasses def addInstantiatedSubclass(x: Class): Unit = _instantiatedSubclasses += x def removeInstantiatedSubclass(x: Class): Unit = _instantiatedSubclasses -= x def ancestors: List[String] = _ancestors def ancestors_=(v: List[String]): Unit = { if (v != _ancestors) { _ancestors = v ancestorsAskers.foreach(_.tag()) ancestorsAskers.clear() } } def registerAskAncestors(asker: MethodImpl): Unit = ancestorsAskers += asker def registerDynamicCaller(methodName: String, caller: MethodImpl): Unit = dynamicCallers.getOrElseUpdate(methodName, mutable.Set.empty) += caller def registerStaticCaller(methodName: String, caller: MethodImpl): Unit = staticCallers.getOrElseUpdate(methodName, mutable.Set.empty) += caller def registerCallerOfStatic(methodName: String, caller: MethodImpl): Unit = callersOfStatic.getOrElseUpdate(methodName, mutable.Set.empty) += caller def unregisterDependee(dependee: MethodImpl): Unit = { ancestorsAskers -= dependee dynamicCallers.values.foreach(_ -= dependee) staticCallers.values.foreach(_ -= dependee) callersOfStatic.values.foreach(_ -= dependee) } def tagDynamicCallersOf(methodName: String): Unit = dynamicCallers.remove(methodName).foreach(_.foreach(_.tag())) def tagStaticCallersOf(methodName: String): Unit = staticCallers.remove(methodName).foreach(_.foreach(_.tag())) def tagCallersOfStatic(methodName: String): Unit = callersOfStatic.remove(methodName).foreach(_.foreach(_.tag())) } private class SeqMethodImpl(owner: MethodContainer, encodedName: String) extends MethodImpl(owner, encodedName) { private val bodyAskers = mutable.Set.empty[MethodImpl] def registerBodyAsker(asker: MethodImpl): Unit = bodyAskers += asker def unregisterDependee(dependee: MethodImpl): Unit = bodyAskers -= dependee def tagBodyAskers(): Unit = { bodyAskers.foreach(_.tag()) bodyAskers.clear() } private var _registeredTo: List[Unregisterable] = Nil private var tagged = false protected def registeredTo(intf: Unregisterable): Unit = _registeredTo ::= intf protected def unregisterFromEverywhere(): Unit = { _registeredTo.foreach(_.unregisterDependee(this)) _registeredTo = Nil } protected def protectTag(): Boolean = { val res = !tagged tagged = true res } protected def resetTag(): Unit = tagged = false } } object IncOptimizer { val factory: ScalaJSOptimizer.OptimizerFactory = new IncOptimizer(_, _) }
matthughes/scala-js
tools/shared/src/main/scala/org/scalajs/core/tools/optimizer/IncOptimizer.scala
Scala
bsd-3-clause
6,206
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala package reflect package internal // Flags at each index of a flags Long. Those marked with /M are used in // Parsers/JavaParsers and therefore definitely appear on Modifiers; but the // absence of /M on the other flags does not imply they aren't. // // Generated by mkFlagsTable() at Thu Feb 02 20:31:52 PST 2012 // // 0: PROTECTED/M // 1: OVERRIDE/M // 2: PRIVATE/M // 3: ABSTRACT/M // 4: DEFERRED/M // 5: FINAL/M // 6: METHOD // 7: INTERFACE/M // 8: MODULE // 9: IMPLICIT/M // 10: SEALED/M // 11: CASE/M // 12: MUTABLE/M // 13: PARAM/M // 14: PACKAGE // 15: MACRO/M // 16: BYNAMEPARAM/M CAPTURED COVARIANT/M // 17: CONTRAVARIANT/M INCONSTRUCTOR LABEL // 18: ABSOVERRIDE/M // 19: LOCAL/M // 20: JAVA/M // 21: SYNTHETIC // 22: STABLE // 23: STATIC/M // 24: CASEACCESSOR/M // 25: DEFAULTPARAM/M TRAIT/M // 26: BRIDGE // 27: ACCESSOR // 28: SUPERACCESSOR // 29: PARAMACCESSOR/M // 30: MODULEVAR // 31: LAZY/M // 32: IS_ERROR // 33: OVERLOADED // 34: LIFTED // 35: EXISTENTIAL MIXEDIN // 36: EXPANDEDNAME // 37: PRESUPER/M // 38: TRANS_FLAG // 39: LOCKED // 40: SPECIALIZED // 41: DEFAULTINIT/M // 42: VBRIDGE // 43: VARARGS // 44: TRIEDCOOKING // 45: SYNCHRONIZED/M // 46: ARTIFACT // 47: JAVA_DEFAULTMETHOD/M // 48: JAVA_ENUM // 49: JAVA_ANNOTATION // 50: // 51: lateDEFERRED // 52: lateFINAL // 53: lateMETHOD // 54: lateINTERFACE // 55: lateMODULE // 56: notPROTECTED // 57: notOVERRIDE // 58: notPRIVATE // 59: // 60: SCALA3X // 61: // 62: // 63: /** Flags set on Modifiers instances in the parsing stage. */ class ModifierFlags { final val IMPLICIT = 1L << 9 final val FINAL = 1L << 5 // May not be overridden. Note that java final implies much more than scala final. final val PRIVATE = 1L << 2 final val PROTECTED = 1L << 0 final val SEALED = 1L << 10 final val OVERRIDE = 1L << 1 final val CASE = 1L << 11 final val ABSTRACT = 1L << 3 // abstract class, or used in conjunction with abstract override. // Note difference to DEFERRED! final val DEFERRED = 1L << 4 // was `abstract` for members | trait is virtual final val INTERFACE = 1L << 7 // symbol is an interface. the flag is set for: // - scala-defined traits with only abstract methods or fields // - any java-defined interface (even if it has default methods) final val MUTABLE = 1L << 12 // symbol is a mutable variable. final val PARAM = 1L << 13 // symbol is a (value or type) parameter to a method final val MACRO = 1L << 15 // symbol is a macro definition final val COVARIANT = 1L << 16 // symbol is a covariant type variable final val BYNAMEPARAM = 1L << 16 // parameter is by name final val CONTRAVARIANT = 1L << 17 // symbol is a contravariant type variable final val ABSOVERRIDE = 1L << 18 // combination of abstract & override final val LOCAL = 1L << 19 // symbol is local to current class (i.e. private[this] or protected[this] // pre: PRIVATE or PROTECTED are also set final val JAVA = 1L << 20 // symbol was defined by a Java class final val SCALA3X = 1L << 60 // class was defined in Scala 3 final val STATIC = 1L << 23 // static field, method or class final val CASEACCESSOR = 1L << 24 // symbol is a case parameter (or its accessor, or a GADT skolem) final val TRAIT = 1L << 25 // symbol is a trait final val DEFAULTPARAM = 1L << 25 // the parameter has a default value final val PARAMACCESSOR = 1L << 29 // for field definitions generated for primary constructor // parameters (no matter if it's a 'val' parameter or not) // for parameters of a primary constructor ('val' or not) // for the accessor methods generated for 'val' or 'var' parameters final val LAZY = 1L << 31 // symbol is a lazy val. can't have MUTABLE unless transformed by typer final val PRESUPER = 1L << 37 // value is evaluated before super call final val DEFAULTINIT = 1L << 41 // symbol is initialized to the default value: used by -Xcheckinit final val ARTIFACT = 1L << 46 // symbol should be ignored when typechecking; will be marked ACC_SYNTHETIC in bytecode // to see which symbols are marked as ARTIFACT, see scaladocs for FlagValues.ARTIFACT final val JAVA_DEFAULTMETHOD = 1L << 47 // symbol is a java default method final val JAVA_ENUM = 1L << 48 // symbol is a java enum final val JAVA_ANNOTATION = 1L << 49 // symbol is a java annotation // Overridden. def flagToString(flag: Long): String = "" final val PrivateLocal = PRIVATE | LOCAL final val ProtectedLocal = PROTECTED | LOCAL final val AccessFlags = PRIVATE | PROTECTED | LOCAL } object ModifierFlags extends ModifierFlags /** All flags and associated operations */ class Flags extends ModifierFlags { final val METHOD = 1L << 6 // a method final val MODULE = 1L << 8 // symbol is module or class implementing a module final val PACKAGE = 1L << 14 // symbol is a java package final val CAPTURED = 1L << 16 // variable is accessed from nested function. Set by LambdaLift. final val LABEL = 1L << 17 // method symbol is a label. Set by TailCall final val INCONSTRUCTOR = 1L << 17 // class symbol is defined in this/superclass constructor. final val SYNTHETIC = 1L << 21 // symbol is compiler-generated (compare with ARTIFACT) final val STABLE = 1L << 22 // functions that are assumed to be stable // (typically, access methods for valdefs) // or classes that do not contain abstract types. final val BRIDGE = 1L << 26 // function is a bridge method. Set by Erasure final val ACCESSOR = 1L << 27 // a value or variable accessor (getter or setter) final val SUPERACCESSOR = 1L << 28 // a super accessor final val MODULEVAR = 1L << 30 // for variables: is the variable caching a module value final val IS_ERROR = 1L << 32 // symbol is an error symbol final val OVERLOADED = 1L << 33 // symbol is overloaded final val LIFTED = 1L << 34 // class has been lifted out to package level // local value has been lifted out to class level // todo: make LIFTED = latePRIVATE? final val MIXEDIN = 1L << 35 // term member has been mixed in final val EXISTENTIAL = 1L << 35 // type is an existential parameter or skolem final val EXPANDEDNAME = 1L << 36 // name has been expanded with class suffix final val TRANS_FLAG = 1L << 38 // transient flag guaranteed to be reset after each phase. final val LOCKED = 1L << 39 // temporary flag to catch cyclic dependencies final val SPECIALIZED = 1L << 40 // symbol is a generated specialized member final val VBRIDGE = 1L << 42 // symbol is a varargs bridge (but not a bridge at the bytecode level) final val VARARGS = 1L << 43 // symbol is a Java-style varargs method final val TRIEDCOOKING = 1L << 44 // `Cooking` has been tried on this symbol // A Java method's type is `cooked` by transforming raw types to existentials final val SYNCHRONIZED = 1L << 45 // symbol is a method which should be marked ACC_SYNCHRONIZED final val SYNTHESIZE_IMPL_IN_SUBCLASS = 1L << 50 // used in fields phase to indicate this accessor should receive an implementation in a subclass // flags used strictly internally in the Fields phase (info/tree transform): final val NEEDS_TREES = 1L << 59 // this symbol needs a tree. (distinct from SYNTHESIZE_IMPL_IN_SUBCLASS) // ------- shift definitions ------------------------------------------------------- // // Flags from 1L to (1L << 50) are normal flags. // // The "late" counterpart to flags DEFERRED (1L << 4) to MODULE (1L << 8) // show up in `sym.flags` as their regular counterpart once the phase mask admits them (see below). // The first late flag (lateDEFERRED) is at (1L << 51), i.e., late flags are shifted by 47. The last one is (1L << 55). // Think of it as a poor man's flag history akin to the type history for a symbol's info. // // The "not" counterpart to flags PROTECTED (1L) to PRIVATE (1L << 2) // are negated flags that suppress their counterpart after a specific phase (see below). // They are shifted by 56, i.e., the first negated flag (notPROTECTED) is at (1L << 56), the last at (1L << 58). // // Late and negative flags are only enabled after certain phases, implemented by the phaseNewFlags // method of the SubComponent, so they implement a bit of a flag history. // // The flags (1L << 59) to (1L << 63) are currently unused. If added to the InitialFlags mask, // they could be used as normal flags. final val InitialFlags = 0x1007FFFFFFFFFFFFL // normal flags, enabled from the first phase: 1L to (1L << 50) + (1L << 60) final val LateFlags = 0x00F8000000000000L // flags that override flags in (1L << 4) to (1L << 8): DEFERRED, FINAL, INTERFACE, METHOD, MODULE final val AntiFlags = 0x0700000000000000L // flags that cancel flags in 1L to (1L << 2): PROTECTED, OVERRIDE, PRIVATE final val LateShift = 47 final val AntiShift = 56 /** all of the flags that are unaffected by phase */ final val PhaseIndependentFlags = 0xF807FFFFFFFFFE08L //this should be // final val PhaseIndependentFlags = (-1L & ~LateFlags & ~AntiFlags & ~(LateFlags >>> LateShift) & ~(AntiFlags >>> AntiShift))) // but the constant folder does not optimise this! Good news is that is expected to be fixed soon :-) assert (PhaseIndependentFlags == (-1L & ~LateFlags & ~AntiFlags & ~(LateFlags >>> LateShift) & ~(AntiFlags >>> AntiShift))) // Flags which sketchily share the same slot // 16: BYNAMEPARAM/M CAPTURED COVARIANT/M // 17: CONTRAVARIANT/M INCONSTRUCTOR LABEL // 25: DEFAULTPARAM/M TRAIT/M // 35: EXISTENTIAL MIXEDIN final val OverloadedFlagsMask = 0L | BYNAMEPARAM | CONTRAVARIANT | DEFAULTPARAM | EXISTENTIAL // ------- late flags (set by a transformer phase) --------------------------------- // // Summary of when these are claimed to be first used. // You can get this output with scalac -Vphases -Vdebug. // // refchecks 7 [START] <latemethod> // specialize 13 [START] <latefinal> <notprivate> // explicitouter 14 [START] <notprotected> // erasure 15 [START] <latedeferred> // mixin 20 [START] <latemodule> <notoverride> // // notPRIVATE set in Symbols#makeNotPrivate, IExplicitOuter#transform, Inliners. // notPROTECTED set in ExplicitOuter#transform. // final val lateDEFERRED = (0L + DEFERRED) << LateShift // unused // final val lateFINAL = (0L + FINAL) << LateShift // only used for inliner -- could be subsumed by notPRIVATE? // final val lateMETHOD = (0L + METHOD) << LateShift // unused // final val lateMODULE = (0L + MODULE) << LateShift // unused // final val notOVERRIDE = (0L + OVERRIDE) << AntiShift // unused final val notPRIVATE = (0L + PRIVATE) << AntiShift final val notPROTECTED = (0L + PROTECTED) << AntiShift // ------- masks ----------------------------------------------------------------------- /** To be a little clearer to people who aren't habitual bit twiddlers. */ final val AllFlags = -1L // TODO - there's no call to slap four flags onto every package. final val PackageFlags = MODULE | PACKAGE | FINAL | JAVA // FINAL not included here due to possibility of object overriding. // In fact, FINAL should not be attached regardless. We should be able // to reconstruct whether an object was marked final in source. final val ModuleFlags = MODULE /** These modifiers can be set explicitly in source programs. This is * used only as the basis for the default flag mask (which ones to display * when printing a normal message.) */ final val ExplicitFlags = PRIVATE | PROTECTED | ABSTRACT | FINAL | SEALED | OVERRIDE | CASE | IMPLICIT | ABSOVERRIDE | LAZY | JAVA_DEFAULTMETHOD /** The two bridge flags */ final val BridgeFlags = BRIDGE | VBRIDGE final val BridgeAndPrivateFlags = BridgeFlags | PRIVATE /** These modifiers appear in TreePrinter output. */ final val PrintableFlags = ExplicitFlags | BridgeFlags | LOCAL | SYNTHETIC | STABLE | CASEACCESSOR | MACRO | ACCESSOR | SUPERACCESSOR | PARAMACCESSOR | STATIC | SPECIALIZED | SYNCHRONIZED | ARTIFACT | SYNTHESIZE_IMPL_IN_SUBCLASS | NEEDS_TREES /** When a symbol for a field is created, only these flags survive * from Modifiers. Others which may be applied at creation time are: * PRIVATE, LOCAL. */ final val FieldFlags = MUTABLE | CASEACCESSOR | PARAMACCESSOR | STATIC | FINAL | PRESUPER | LAZY | DEFAULTINIT /** Masks for getters and setters, where the flags are derived from those * on the field's modifiers. Both getters and setters get the ACCESSOR flag. * Getters of immutable values also get STABLE. */ final val GetterFlags = ~(PRESUPER | MUTABLE) final val SetterFlags = ~(PRESUPER | MUTABLE | STABLE | CASEACCESSOR | IMPLICIT) /** Since DEFAULTPARAM is overloaded with TRAIT, we need some additional * means of determining what that bit means. Usually DEFAULTPARAM is coupled * with PARAM, which suffices. Default getters get METHOD instead. * This constant is the mask of flags which can survive from the parameter modifiers. * See paramFlagsToDefaultGetter for the full logic. */ final val DefaultGetterFlags = PRIVATE | PROTECTED | FINAL | PARAMACCESSOR /** When a symbol for a method parameter is created, only these flags survive * from Modifiers. Others which may be applied at creation time are: * SYNTHETIC. */ final val ValueParameterFlags = BYNAMEPARAM | IMPLICIT | DEFAULTPARAM | STABLE | SYNTHETIC final val BeanPropertyFlags = DEFERRED | OVERRIDE | STATIC final val VarianceFlags = COVARIANT | CONTRAVARIANT /** These appear to be flags which should be transferred from owner symbol * to a newly created constructor symbol. */ final val ConstrFlags = JAVA /** Module flags inherited by their module-class */ final val ModuleToClassFlags = AccessFlags | PackageFlags | CASE | SYNTHETIC final val ValidAliasFlags = SUPERACCESSOR | PARAMACCESSOR | MIXEDIN | SPECIALIZED /** These flags are not pickled */ final val FlagsNotPickled = IS_ERROR | OVERLOADED | LIFTED | TRANS_FLAG | LOCKED | TRIEDCOOKING | SCALA3X // A precaution against future additions to FlagsNotPickled turning out // to be overloaded flags thus not-pickling more than intended. assert( (OverloadedFlagsMask & FlagsNotPickled) == 0, "overloaded flags should not overlap with FlagsNotPickled; found: " + flagsToString(OverloadedFlagsMask & FlagsNotPickled) ) /** These flags are pickled */ final val PickledFlags = ( (InitialFlags & ~FlagsNotPickled) | notPRIVATE // for value class constructors (scala/bug#6601), and private members referenced // in @inline-marked methods publicized in SuperAccessors (see scala/bug#6608, e6b4204604) ) /** If we have a top-level class or module * and someone asks us for a flag not in TopLevelPickledFlags, * then we don't need unpickling to give a definite answer. */ final val TopLevelPickledFlags = PickledFlags & ~(MODULE | METHOD | PACKAGE | PARAM | EXISTENTIAL) def paramFlagsToDefaultGetter(paramFlags: Long): Long = (paramFlags & DefaultGetterFlags) | SYNTHETIC | METHOD | DEFAULTPARAM def getterFlags(fieldFlags: Long): Long = ACCESSOR + ( if ((fieldFlags & MUTABLE) != 0) fieldFlags & ~MUTABLE & ~PRESUPER else fieldFlags & ~PRESUPER | STABLE ) def setterFlags(fieldFlags: Long): Long = getterFlags(fieldFlags) & ~STABLE & ~CASEACCESSOR // ------- pickling and unpickling of flags ----------------------------------------------- // The flags from 0x001 to 0x800 are different in the raw flags // and in the pickled format. private final val IMPLICIT_PKL = (1L << 0) private final val FINAL_PKL = (1L << 1) private final val PRIVATE_PKL = (1L << 2) private final val PROTECTED_PKL = (1L << 3) private final val SEALED_PKL = (1L << 4) private final val OVERRIDE_PKL = (1L << 5) private final val CASE_PKL = (1L << 6) private final val ABSTRACT_PKL = (1L << 7) private final val DEFERRED_PKL = (1L << 8) private final val METHOD_PKL = (1L << 9) private final val MODULE_PKL = (1L << 10) private final val INTERFACE_PKL = (1L << 11) private final val PKL_MASK = 0x00000FFF /** Pickler correspondence, ordered roughly by frequency of occurrence */ private def rawPickledCorrespondence = Array[(Long, Long)]( (METHOD, METHOD_PKL), (PRIVATE, PRIVATE_PKL), (FINAL, FINAL_PKL), (PROTECTED, PROTECTED_PKL), (CASE, CASE_PKL), (DEFERRED, DEFERRED_PKL), (MODULE, MODULE_PKL), (OVERRIDE, OVERRIDE_PKL), (INTERFACE, INTERFACE_PKL), (IMPLICIT, IMPLICIT_PKL), (SEALED, SEALED_PKL), (ABSTRACT, ABSTRACT_PKL) ) private[this] val mappedRawFlags = rawPickledCorrespondence map (_._1) private[this] val mappedPickledFlags = rawPickledCorrespondence map (_._2) private class MapFlags(from: Array[Long], to: Array[Long]) extends (Long => Long) { val fromSet = from.foldLeft(0L) (_ | _) def apply(flags: Long): Long = { var result = flags & ~fromSet var tobeMapped = flags & fromSet var i = 0 while (tobeMapped != 0) { if ((tobeMapped & from(i)) != 0) { result |= to(i) tobeMapped &= ~from(i) } i += 1 } result } } val rawToPickledFlags: Long => Long = new MapFlags(mappedRawFlags, mappedPickledFlags) val pickledToRawFlags: Long => Long = new MapFlags(mappedPickledFlags, mappedRawFlags) // ------ displaying flags -------------------------------------------------------- // Generated by mkFlagToStringMethod() at Thu Feb 02 20:31:52 PST 2012 @annotation.switch override def flagToString(flag: Long): String = flag match { case PROTECTED => "protected" // (1L << 0) case OVERRIDE => "override" // (1L << 1) case PRIVATE => "private" // (1L << 2) case ABSTRACT => "abstract" // (1L << 3) case DEFERRED => "<deferred>" // (1L << 4) case FINAL => "final" // (1L << 5) case METHOD => "<method>" // (1L << 6) case INTERFACE => "<interface>" // (1L << 7) case MODULE => "<module>" // (1L << 8) case IMPLICIT => "implicit" // (1L << 9) case SEALED => "sealed" // (1L << 10) case CASE => "case" // (1L << 11) case MUTABLE => "<mutable>" // (1L << 12) case PARAM => "<param>" // (1L << 13) case PACKAGE => "<package>" // (1L << 14) case MACRO => "<macro>" // (1L << 15) case BYNAMEPARAM => "<bynameparam/captured/covariant>" // (1L << 16) case CONTRAVARIANT => "<contravariant/inconstructor/label>" // (1L << 17) case ABSOVERRIDE => "absoverride" // (1L << 18) case LOCAL => "<local>" // (1L << 19) case JAVA => "<java>" // (1L << 20) case SYNTHETIC => "<synthetic>" // (1L << 21) case STABLE => "<stable>" // (1L << 22) case STATIC => "<static>" // (1L << 23) case CASEACCESSOR => "<caseaccessor>" // (1L << 24) case DEFAULTPARAM => "<defaultparam/trait>" // (1L << 25) case BRIDGE => "<bridge>" // (1L << 26) case ACCESSOR => "<accessor>" // (1L << 27) case SUPERACCESSOR => "<superaccessor>" // (1L << 28) case PARAMACCESSOR => "<paramaccessor>" // (1L << 29) case MODULEVAR => "<modulevar>" // (1L << 30) case LAZY => "lazy" // (1L << 31) case IS_ERROR => "<is_error>" // (1L << 32) case OVERLOADED => "<overloaded>" // (1L << 33) case LIFTED => "<lifted>" // (1L << 34) case EXISTENTIAL => "<existential/mixedin>" // (1L << 35) case EXPANDEDNAME => "<expandedname>" // (1L << 36) case PRESUPER => "<presuper>" // (1L << 37) case TRANS_FLAG => "<trans_flag>" // (1L << 38) case LOCKED => "<locked>" // (1L << 39) case SPECIALIZED => "<specialized>" // (1L << 40) case DEFAULTINIT => "<defaultinit>" // (1L << 41) case VBRIDGE => "<vbridge>" // (1L << 42) case VARARGS => "<varargs>" // (1L << 43) case TRIEDCOOKING => "<triedcooking>" // (1L << 44) case SYNCHRONIZED => "<synchronized>" // (1L << 45) case ARTIFACT => "<artifact>" // (1L << 46) case JAVA_DEFAULTMETHOD => "<defaultmethod>" // (1L << 47) case JAVA_ENUM => "<enum>" // (1L << 48) case JAVA_ANNOTATION => "<annotation>" // (1L << 49) case SYNTHESIZE_IMPL_IN_SUBCLASS => "<sub_synth>" // (1L << 50) case 0x08000000000000L => "<latedeferred>" // (1L << 51) case 0x10000000000000L => "<latefinal>" // (1L << 52) case 0x20000000000000L => "<latemethod>" // (1L << 53) case 0x40000000000000L => "" // (1L << 54) case 0x80000000000000L => "<latemodule>" // (1L << 55) case `notPROTECTED` => "<notprotected>" // (1L << 56) case 0x200000000000000L => "<notoverride>" // (1L << 57) case `notPRIVATE` => "<notprivate>" // (1L << 58) case NEEDS_TREES => "<needs_trees>" // (1L << 59) case SCALA3X => "<scala3>" // (1L << 60) case 0x2000000000000000L => "" // (1L << 61) case 0x4000000000000000L => "" // (1L << 62) case 0x8000000000000000L => "" // (1L << 63) case _ => "" } private def accessString(flags: Long, privateWithin: String)= ( if (privateWithin == "") { if ((flags & PrivateLocal) == PrivateLocal) "private[this]" else if ((flags & ProtectedLocal) == ProtectedLocal) "protected[this]" else if ((flags & PRIVATE) != 0) "private" else if ((flags & PROTECTED) != 0) "protected" else "" } else if ((flags & PROTECTED) != 0) "protected[" + privateWithin + "]" else "private[" + privateWithin + "]" ) // FIXME: This method is used several places internally. Remove its // internal use and then re-deprecate it. // @deprecated("use flagString on the flag-carrying member", "2.10.0") private[scala] def flagsToString(flags: Long, privateWithin: String): String = { val access = accessString(flags, privateWithin) val nonAccess = flagsToString(flags & ~AccessFlags) List(nonAccess, access) filterNot (_ == "") mkString " " } // FIXME: This method is used several places internally. Remove its // internal use and then re-deprecate it. // @deprecated("use flagString on the flag-carrying member", "2.10.0") private[scala] def flagsToString(flags: Long): String = { // Fast path for common case if (flags == 0L) "" else { var sb: StringBuilder = null var i = 0 while (i <= MaxBitPosition) { val mask = rawFlagPickledOrder(i) if ((flags & mask) != 0L) { val s = flagToString(mask) if (s.length > 0) { if (sb eq null) sb = new StringBuilder append s else if (sb.length == 0) sb append s else sb append " " append s } } i += 1 } if (sb eq null) "" else sb.toString } } // List of the raw flags, in pickled order final val MaxBitPosition = 62 final val pickledListOrder: List[Long] = { val all = 0 to MaxBitPosition map (1L << _) val front = mappedRawFlags map (_.toLong) front.toList ++ (all filterNot (front contains _)) } final val rawFlagPickledOrder: Array[Long] = pickledListOrder.toArray } object Flags extends Flags
scala/scala
src/reflect/scala/reflect/internal/Flags.scala
Scala
apache-2.0
27,271
package com.arcusys.valamis.lesson.scorm.service.sequencing import com.arcusys.valamis.lesson.scorm.model.tracking.{ ActivityStateNode, ActivityStateTree } trait DeliveryRequestServiceContract { def apply(tree: ActivityStateTree, activityToDeliver: ActivityStateNode) }
ViLPy/Valamis
valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/service/sequencing/DeliveryRequestServiceContract.scala
Scala
lgpl-3.0
274
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.command.mutation.merge import java.util import java.util.UUID import scala.collection.JavaConverters._ import org.apache.hadoop.fs.Path import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType} import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl import org.apache.spark.rdd.RDD import org.apache.spark.sql.{DataFrame, Row, SparkSession} import org.apache.spark.sql.avro.AvroFileFormatFactory import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.GenericInternalRow import org.apache.spark.sql.execution.command.{ExecutionErrors, UpdateTableModel} import org.apache.spark.sql.execution.command.mutation.HorizontalCompaction import org.apache.spark.sql.functions.col import org.apache.spark.sql.types.{StringType, StructField} import org.apache.spark.sql.util.SparkSQLUtil import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.index.Segment import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.SegmentUpdateDetails import org.apache.carbondata.processing.loading.FailureCauses import org.apache.carbondata.spark.util.CarbonSparkUtil /** * This class handles the merge actions of UPSERT, UPDATE, DELETE, INSERT */ abstract class MergeHandler( sparkSession: SparkSession, frame: DataFrame, targetCarbonTable: CarbonTable, stats: Stats, srcDS: DataFrame) { protected def performTagging: (RDD[Row], String) = { val tupleId = frame.queryExecution.analyzed.output.zipWithIndex .find(_._1.name.equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID)).get._2 val schema = org.apache.spark.sql.types.StructType(Seq( StructField(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID, StringType))) val job = CarbonSparkUtil.createHadoopJob() job.setOutputKeyClass(classOf[Void]) job.setOutputValueClass(classOf[InternalRow]) val insertedRows = stats.insertedRows val updatedRows = stats.updatedRows val uuid = UUID.randomUUID.toString job.setJobID(new JobID(uuid, 0)) val path = targetCarbonTable.getTablePath + CarbonCommonConstants.FILE_SEPARATOR + "avro" FileOutputFormat.setOutputPath(job, new Path(path)) val factory = AvroFileFormatFactory.getAvroWriter(sparkSession, job, schema) val config = SparkSQLUtil.broadCastHadoopConf(sparkSession.sparkContext, job.getConfiguration) frame.queryExecution.toRdd.mapPartitionsWithIndex { case (index, iterator) => val confB = config.value.value val task = new TaskID(new JobID(uuid, 0), TaskType.MAP, index) val attemptID = new TaskAttemptID(task, index) val context = new TaskAttemptContextImpl(confB, attemptID) val writer = factory.newInstance(path + CarbonCommonConstants.FILE_SEPARATOR + task.toString, schema, context) new Iterator[InternalRow] { override def hasNext: Boolean = { if (iterator.hasNext) { true } else { writer.close() false } } override def next(): InternalRow = { val row = iterator.next() val newArray = new Array[Any](1) val tupleID = row.getUTF8String(tupleId) if (tupleID == null) { insertedRows.add(1) } else { newArray(0) = tupleID writer.write(new GenericInternalRow(newArray)) updatedRows.add(1) } null } } }.count() val deltaRdd = AvroFileFormatFactory.readAvro(sparkSession, path) (deltaRdd, path) } protected def triggerAction( factTimestamp: Long, executorErrors: ExecutionErrors, deltaRdd: RDD[Row], deltaPath: String): (util.List[SegmentUpdateDetails], Seq[Segment]) = { val tuple = MergeUtil.triggerAction(sparkSession, targetCarbonTable, factTimestamp, executorErrors, deltaRdd) FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(deltaPath)) MergeUtil.updateSegmentStatusAfterUpdateOrDelete(targetCarbonTable, factTimestamp, tuple) tuple } protected def insertDataToTargetTable(updateTableModel: Option[UpdateTableModel]): Seq[Row] = { val tableCols = targetCarbonTable.getCreateOrderColumn.asScala.map(_.getColName). filterNot(_.equalsIgnoreCase(CarbonCommonConstants.DEFAULT_INVISIBLE_DUMMY_MEASURE)) val header = tableCols.mkString(",") val dataFrame = srcDS.select(tableCols.map(col): _*) MergeUtil.insertDataToTargetTable(sparkSession, targetCarbonTable, header, updateTableModel, dataFrame) } protected def tryHorizontalCompaction(): Unit = { // Do IUD Compaction. HorizontalCompaction.tryHorizontalCompaction( sparkSession, targetCarbonTable) } def handleMerge() } case class UpdateHandler( sparkSession: SparkSession, frame: DataFrame, targetCarbonTable: CarbonTable, stats: Stats, srcDS: DataFrame) extends MergeHandler(sparkSession, frame, targetCarbonTable, stats, srcDS) { override def handleMerge(): Unit = { assert(frame != null, "The dataframe used to perform merge can be only for insert operation") val factTimestamp = System.currentTimeMillis() val executorErrors = ExecutionErrors(FailureCauses.NONE, "") val (deltaRdd, path) = performTagging if (deltaRdd.isEmpty()) { return } val tuple = triggerAction(factTimestamp, executorErrors, deltaRdd, path) val updateTableModel = Some(UpdateTableModel(isUpdate = true, factTimestamp, executorErrors, tuple._2, Option.empty)) insertDataToTargetTable(updateTableModel) tryHorizontalCompaction() } } case class DeleteHandler( sparkSession: SparkSession, frame: DataFrame, targetCarbonTable: CarbonTable, stats: Stats, srcDS: DataFrame) extends MergeHandler(sparkSession, frame, targetCarbonTable, stats, srcDS) { override def handleMerge(): Unit = { assert(frame != null, "The dataframe used to perform merge can be only for insert operation") val factTimestamp = System.currentTimeMillis() val executorErrors = ExecutionErrors(FailureCauses.NONE, "") val (deleteRDD, path) = performTagging if (deleteRDD.isEmpty()) { return } triggerAction(factTimestamp, executorErrors, deleteRDD, path) MergeUtil.updateStatusIfJustDeleteOperation(targetCarbonTable, factTimestamp) tryHorizontalCompaction() } } case class InsertHandler( sparkSession: SparkSession, frame: DataFrame, targetCarbonTable: CarbonTable, stats: Stats, srcDS: DataFrame) extends MergeHandler(sparkSession, frame, targetCarbonTable, stats, srcDS) { override def handleMerge(): Unit = { insertDataToTargetTable(None) } } case class UpsertHandler( sparkSession: SparkSession, frame: DataFrame, targetCarbonTable: CarbonTable, stats: Stats, srcDS: DataFrame) extends MergeHandler(sparkSession, frame, targetCarbonTable, stats, srcDS) { override def handleMerge(): Unit = { assert(frame != null, "The dataframe used to perform merge can be only for insert operation") val factTimestamp = System.currentTimeMillis() val executorErrors = ExecutionErrors(FailureCauses.NONE, "") val (updateDataRDD, path) = performTagging val tuple = triggerAction(factTimestamp, executorErrors, updateDataRDD, path) val updateTableModel = Some(UpdateTableModel(isUpdate = true, factTimestamp, executorErrors, tuple._2, Option.empty)) insertDataToTargetTable(updateTableModel) tryHorizontalCompaction() } }
zzcclp/carbondata
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/MergeHandler.scala
Scala
apache-2.0
8,618
/* * Copyright (c) 2018. Fengguo Wei and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License v2.0 * which accompanies this distribution, and is available at * https://www.apache.org/licenses/LICENSE-2.0 * * Detailed contributors are listed in the CONTRIBUTOR.md */ package org.argus.jawa.core import org.argus.jawa.core.elements.{AccessFlag, JawaType} import org.argus.jawa.core.io.NoReporter import org.scalatest.{FlatSpec, Matchers} import org.argus.jawa.core.util.FileUtil /** * @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a> */ class GlobalTest extends FlatSpec with Matchers { "Load code" should "have size 4" in { val srcUri = FileUtil.toUri(getClass.getResource("/test1").getPath) val global = new Global("test", new NoReporter) global.load(srcUri ,Constants.JAWA_FILE_EXT) assert(global.getApplicationClassCodes.size == 4) } "Load code" should "have given type" in { val srcUri = FileUtil.toUri(getClass.getResource("/test1").getPath) val global = new Global("test", new NoReporter) global.load(srcUri ,Constants.JAWA_FILE_EXT) assert(global.getApplicationClassCodes.contains(new JawaType("com.ksu.fieldFlowSentivity.MainActivity"))) } "Given type" should "in application category" in { val srcUri = FileUtil.toUri(getClass.getResource("/test1").getPath) val global = new Global("test", new NoReporter) global.load(srcUri ,Constants.JAWA_FILE_EXT) assert(global.isApplicationClasses(new JawaType("com.ksu.fieldFlowSentivity.MainActivity")) && global.getClassCategoryFromClassPath(new JawaType("com.ksu.fieldFlowSentivity.MainActivity")) == global.ClassCategory.APPLICATION) } "Given type" should "in library category" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) assert(global.isSystemLibraryClasses(new JawaType("java.lang.Object"))&& global.getClassCategoryFromClassPath(new JawaType("java.lang.Object")) == global.ClassCategory.SYSTEM_LIBRARY) } "Class path" should "contains given type" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) assert(global.containsClass(new JawaType("java.lang.Object"))) } "MyClass for java.lang.Object" should "have following content" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) val myClass = global.getMyClass(new JawaType("java.lang.Object")).get assert( AccessFlag.isPublic(myClass.accessFlag) && myClass.typ == new JawaType("java.lang.Object") && myClass.superType.isEmpty && myClass.interfaces.isEmpty && myClass.outerType.isEmpty ) } "MyClass for android.app.Activity" should "have following content" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) val myClass = global.getMyClass(new JawaType("android.app.Activity")).get assert( AccessFlag.isPublic(myClass.accessFlag) && myClass.typ == new JawaType("android.app.Activity") && myClass.superType.isDefined && myClass.superType.get.equals(new JawaType("android.view.ContextThemeWrapper")) && myClass.interfaces.size == 5 && myClass.outerType.isEmpty ) } "getClazz for java.lang.Object" should "return a JawaClass" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) assert(global.getClazz(new JawaType("java.lang.Object")).isDefined) } "getClazz for java.lang.Object1" should "return None" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) assert(global.getClazz(new JawaType("java.lang.Object1")).isEmpty) } "getClassOrResolve for java.lang.Object1" should "return an unknown JawaClass" in { val global = new Global("test", new NoReporter) global.setJavaLib(getClass.getResource("/libs/android.jar").getPath) assert(global.getClassOrResolve(new JawaType("java.lang.Object1")).isUnknown) } "Get application classes" should "return 4 classes" in { val srcUri = FileUtil.toUri(getClass.getResource("/test1").getPath) val global = new Global("test", new NoReporter) global.load(srcUri ,Constants.JAWA_FILE_EXT) assert(global.getApplicationClasses.size == 4) } "Get user library classes" should "return empty" in { val srcUri = FileUtil.toUri(getClass.getResource("/test1").getPath) val global = new Global("test", new NoReporter) global.load(srcUri ,Constants.JAWA_FILE_EXT) assert(global.getUserLibraryClasses.isEmpty) } }
arguslab/Argus-SAF
jawa/src/test/scala/org/argus/jawa/core/GlobalTest.scala
Scala
apache-2.0
4,878
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.joins import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput} import java.nio.ByteOrder import java.util.{HashMap => JavaHashMap} import org.apache.spark.memory.{TaskMemoryManager, StaticMemoryManager} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.execution.SparkSqlSerializer import org.apache.spark.sql.execution.local.LocalNode import org.apache.spark.sql.execution.metric.{LongSQLMetric, SQLMetrics} import org.apache.spark.unsafe.Platform import org.apache.spark.unsafe.map.BytesToBytesMap import org.apache.spark.unsafe.memory.MemoryLocation import org.apache.spark.util.{SizeEstimator, KnownSizeEstimation, Utils} import org.apache.spark.util.collection.CompactBuffer import org.apache.spark.{SparkConf, SparkEnv} /** * Interface for a hashed relation by some key. Use [[HashedRelation.apply]] to create a concrete * object. */ private[execution] sealed trait HashedRelation { def get(key: InternalRow): Seq[InternalRow] // This is a helper method to implement Externalizable, and is used by // GeneralHashedRelation and UniqueKeyHashedRelation protected def writeBytes(out: ObjectOutput, serialized: Array[Byte]): Unit = { out.writeInt(serialized.length) // Write the length of serialized bytes first out.write(serialized) } // This is a helper method to implement Externalizable, and is used by // GeneralHashedRelation and UniqueKeyHashedRelation protected def readBytes(in: ObjectInput): Array[Byte] = { val serializedSize = in.readInt() // Read the length of serialized bytes first val bytes = new Array[Byte](serializedSize) in.readFully(bytes) bytes } } /** * A general [[HashedRelation]] backed by a hash map that maps the key into a sequence of values. */ private[joins] final class GeneralHashedRelation( private var hashTable: JavaHashMap[InternalRow, CompactBuffer[InternalRow]]) extends HashedRelation with Externalizable { // Needed for serialization (it is public to make Java serialization work) def this() = this(null) override def get(key: InternalRow): Seq[InternalRow] = hashTable.get(key) override def writeExternal(out: ObjectOutput): Unit = { writeBytes(out, SparkSqlSerializer.serialize(hashTable)) } override def readExternal(in: ObjectInput): Unit = { hashTable = SparkSqlSerializer.deserialize(readBytes(in)) } } /** * A specialized [[HashedRelation]] that maps key into a single value. This implementation * assumes the key is unique. */ private[joins] final class UniqueKeyHashedRelation(private var hashTable: JavaHashMap[InternalRow, InternalRow]) extends HashedRelation with Externalizable { // Needed for serialization (it is public to make Java serialization work) def this() = this(null) override def get(key: InternalRow): Seq[InternalRow] = { val v = hashTable.get(key) if (v eq null) null else CompactBuffer(v) } def getValue(key: InternalRow): InternalRow = hashTable.get(key) override def writeExternal(out: ObjectOutput): Unit = { writeBytes(out, SparkSqlSerializer.serialize(hashTable)) } override def readExternal(in: ObjectInput): Unit = { hashTable = SparkSqlSerializer.deserialize(readBytes(in)) } } // TODO(rxin): a version of [[HashedRelation]] backed by arrays for consecutive integer keys. private[execution] object HashedRelation { def apply(localNode: LocalNode, keyGenerator: Projection): HashedRelation = { apply(localNode.asIterator, SQLMetrics.nullLongMetric, keyGenerator) } def apply( input: Iterator[InternalRow], numInputRows: LongSQLMetric, keyGenerator: Projection, sizeEstimate: Int = 64): HashedRelation = { if (keyGenerator.isInstanceOf[UnsafeProjection]) { return UnsafeHashedRelation( input, numInputRows, keyGenerator.asInstanceOf[UnsafeProjection], sizeEstimate) } // TODO: Use Spark's HashMap implementation. val hashTable = new JavaHashMap[InternalRow, CompactBuffer[InternalRow]](sizeEstimate) var currentRow: InternalRow = null // Whether the join key is unique. If the key is unique, we can convert the underlying // hash map into one specialized for this. var keyIsUnique = true // Create a mapping of buildKeys -> rows while (input.hasNext) { currentRow = input.next() numInputRows += 1 val rowKey = keyGenerator(currentRow) if (!rowKey.anyNull) { val existingMatchList = hashTable.get(rowKey) val matchList = if (existingMatchList == null) { val newMatchList = new CompactBuffer[InternalRow]() hashTable.put(rowKey.copy(), newMatchList) newMatchList } else { keyIsUnique = false existingMatchList } matchList += currentRow.copy() } } if (keyIsUnique) { val uniqHashTable = new JavaHashMap[InternalRow, InternalRow](hashTable.size) val iter = hashTable.entrySet().iterator() while (iter.hasNext) { val entry = iter.next() uniqHashTable.put(entry.getKey, entry.getValue()(0)) } new UniqueKeyHashedRelation(uniqHashTable) } else { new GeneralHashedRelation(hashTable) } } } /** * A HashedRelation for UnsafeRow, which is backed by HashMap or BytesToBytesMap that maps the key * into a sequence of values. * * When it's created, it uses HashMap. After it's serialized and deserialized, it switch to use * BytesToBytesMap for better memory performance (multiple values for the same are stored as a * continuous byte array. * * It's serialized in the following format: * [number of keys] * [size of key] [size of all values in bytes] [key bytes] [bytes for all values] * ... * * All the values are serialized as following: * [number of fields] [number of bytes] [underlying bytes of UnsafeRow] * ... */ private[joins] final class UnsafeHashedRelation( private var hashTable: JavaHashMap[UnsafeRow, CompactBuffer[UnsafeRow]]) extends HashedRelation with KnownSizeEstimation with Externalizable { private[joins] def this() = this(null) // Needed for serialization // Use BytesToBytesMap in executor for better performance (it's created when deserialization) // This is used in broadcast joins and distributed mode only @transient private[this] var binaryMap: BytesToBytesMap = _ /** * Return the size of the unsafe map on the executors. * * For broadcast joins, this hashed relation is bigger on the driver because it is * represented as a Java hash map there. While serializing the map to the executors, * however, we rehash the contents in a binary map to reduce the memory footprint on * the executors. * * For non-broadcast joins or in local mode, return 0. */ def getUnsafeSize: Long = { if (binaryMap != null) { binaryMap.getTotalMemoryConsumption } else { 0 } } override def estimatedSize: Long = { if (binaryMap != null) { binaryMap.getTotalMemoryConsumption } else { SizeEstimator.estimate(hashTable) } } override def get(key: InternalRow): Seq[InternalRow] = { val unsafeKey = key.asInstanceOf[UnsafeRow] if (binaryMap != null) { // Used in Broadcast join val map = binaryMap // avoid the compiler error val loc = new map.Location // this could be allocated in stack binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset, unsafeKey.getSizeInBytes, loc) if (loc.isDefined) { val buffer = CompactBuffer[UnsafeRow]() val base = loc.getValueAddress.getBaseObject var offset = loc.getValueAddress.getBaseOffset val last = loc.getValueAddress.getBaseOffset + loc.getValueLength while (offset < last) { val numFields = Platform.getInt(base, offset) val sizeInBytes = Platform.getInt(base, offset + 4) offset += 8 val row = new UnsafeRow row.pointTo(base, offset, numFields, sizeInBytes) buffer += row offset += sizeInBytes } buffer } else { null } } else { // Use the Java HashMap in local mode or for non-broadcast joins (e.g. ShuffleHashJoin) hashTable.get(unsafeKey) } } override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException { if (binaryMap != null) { // This could happen when a cached broadcast object need to be dumped into disk to free memory out.writeInt(binaryMap.numElements()) var buffer = new Array[Byte](64) def write(addr: MemoryLocation, length: Int): Unit = { if (buffer.length < length) { buffer = new Array[Byte](length) } Platform.copyMemory(addr.getBaseObject, addr.getBaseOffset, buffer, Platform.BYTE_ARRAY_OFFSET, length) out.write(buffer, 0, length) } val iter = binaryMap.iterator() while (iter.hasNext) { val loc = iter.next() // [key size] [values size] [key bytes] [values bytes] out.writeInt(loc.getKeyLength) out.writeInt(loc.getValueLength) write(loc.getKeyAddress, loc.getKeyLength) write(loc.getValueAddress, loc.getValueLength) } } else { assert(hashTable != null) out.writeInt(hashTable.size()) val iter = hashTable.entrySet().iterator() while (iter.hasNext) { val entry = iter.next() val key = entry.getKey val values = entry.getValue // write all the values as single byte array var totalSize = 0L var i = 0 while (i < values.length) { totalSize += values(i).getSizeInBytes + 4 + 4 i += 1 } assert(totalSize < Integer.MAX_VALUE, "values are too big") // [key size] [values size] [key bytes] [values bytes] out.writeInt(key.getSizeInBytes) out.writeInt(totalSize.toInt) out.write(key.getBytes) i = 0 while (i < values.length) { // [num of fields] [num of bytes] [row bytes] // write the integer in native order, so they can be read by UNSAFE.getInt() if (ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN) { out.writeInt(values(i).numFields()) out.writeInt(values(i).getSizeInBytes) } else { out.writeInt(Integer.reverseBytes(values(i).numFields())) out.writeInt(Integer.reverseBytes(values(i).getSizeInBytes)) } out.write(values(i).getBytes) i += 1 } } } } override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException { val nKeys = in.readInt() // This is used in Broadcast, shared by multiple tasks, so we use on-heap memory // TODO(josh): This needs to be revisited before we merge this patch; making this change now // so that tests compile: val taskMemoryManager = new TaskMemoryManager( new StaticMemoryManager( new SparkConf().set("spark.memory.offHeap.enabled", "false"), Long.MaxValue, Long.MaxValue, 1), 0) val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes) .getOrElse(new SparkConf().getSizeAsBytes("spark.buffer.pageSize", "16m")) // TODO(josh): We won't need this dummy memory manager after future refactorings; revisit // during code review binaryMap = new BytesToBytesMap( taskMemoryManager, (nKeys * 1.5 + 1).toInt, // reduce hash collision pageSizeBytes) var i = 0 var keyBuffer = new Array[Byte](1024) var valuesBuffer = new Array[Byte](1024) while (i < nKeys) { val keySize = in.readInt() val valuesSize = in.readInt() if (keySize > keyBuffer.length) { keyBuffer = new Array[Byte](keySize) } in.readFully(keyBuffer, 0, keySize) if (valuesSize > valuesBuffer.length) { valuesBuffer = new Array[Byte](valuesSize) } in.readFully(valuesBuffer, 0, valuesSize) // put it into binary map val loc = binaryMap.lookup(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize) assert(!loc.isDefined, "Duplicated key found!") val putSuceeded = loc.putNewKey( keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize, valuesBuffer, Platform.BYTE_ARRAY_OFFSET, valuesSize) if (!putSuceeded) { throw new IOException("Could not allocate memory to grow BytesToBytesMap") } i += 1 } } } private[joins] object UnsafeHashedRelation { def apply( input: Iterator[InternalRow], numInputRows: LongSQLMetric, keyGenerator: UnsafeProjection, sizeEstimate: Int): HashedRelation = { // Use a Java hash table here because unsafe maps expect fixed size records val hashTable = new JavaHashMap[UnsafeRow, CompactBuffer[UnsafeRow]](sizeEstimate) // Create a mapping of buildKeys -> rows while (input.hasNext) { val unsafeRow = input.next().asInstanceOf[UnsafeRow] numInputRows += 1 val rowKey = keyGenerator(unsafeRow) if (!rowKey.anyNull) { val existingMatchList = hashTable.get(rowKey) val matchList = if (existingMatchList == null) { val newMatchList = new CompactBuffer[UnsafeRow]() hashTable.put(rowKey.copy(), newMatchList) newMatchList } else { existingMatchList } matchList += unsafeRow.copy() } } new UnsafeHashedRelation(hashTable) } }
chenc10/Spark-PAF
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
Scala
apache-2.0
14,416
/** * Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com> */ package akka.io import Tcp._ import akka.actor.{ ActorLogging, Props } /** * INTERNAL API * * TcpManager is a facade for accepting commands ([[akka.io.Tcp.Command]]) to open client or server TCP connections. * * TcpManager is obtainable by calling {{{ IO(Tcp) }}} (see [[akka.io.IO]] and [[akka.io.Tcp]]) * * == Bind == * * To bind and listen to a local address, a [[akka.io.Tcp.Bind]] command must be sent to this actor. If the binding * was successful, the sender of the [[akka.io.Tcp.Bind]] will be notified with a [[akka.io.Tcp.Bound]] * message. The sender() of the [[akka.io.Tcp.Bound]] message is the Listener actor (an internal actor responsible for * listening to server events). To unbind the port an [[akka.io.Tcp.Unbind]] message must be sent to the Listener actor. * * If the bind request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified * with a [[akka.io.Tcp.CommandFailed]] message. This message contains the original command for reference. * * When an inbound TCP connection is established, the handler will be notified by a [[akka.io.Tcp.Connected]] message. * The sender of this message is the Connection actor (an internal actor representing the TCP connection). At this point * the procedure is the same as for outbound connections (see section below). * * == Connect == * * To initiate a connection to a remote server, a [[akka.io.Tcp.Connect]] message must be sent to this actor. If the * connection succeeds, the sender() will be notified with a [[akka.io.Tcp.Connected]] message. The sender of the * [[akka.io.Tcp.Connected]] message is the Connection actor (an internal actor representing the TCP connection). Before * starting to use the connection, a handler must be registered to the Connection actor by sending a [[akka.io.Tcp.Register]] * command message. After a handler has been registered, all incoming data will be sent to the handler in the form of * [[akka.io.Tcp.Received]] messages. To write data to the connection, a [[akka.io.Tcp.Write]] message must be sent * to the Connection actor. * * If the connect request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified * with a [[akka.io.Tcp.CommandFailed]] message. This message contains the original command for reference. * */ private[io] class TcpManager(tcp: TcpExt) extends SelectionHandler.SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) with ActorLogging { def receive = workerForCommandHandler { case c: Connect ⇒ val commander = sender() // cache because we create a function that will run asynchly (registry ⇒ Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c)) case b: Bind ⇒ val commander = sender() // cache because we create a function that will run asynchly (registry ⇒ Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b)) } }
rorygraves/perf_tester
corpus/akka/akka-actor/src/main/scala/akka/io/TcpManager.scala
Scala
apache-2.0
3,276
package com.twitter.finagle.transport import com.twitter.concurrent.AsyncQueue import com.twitter.finagle.context.Contexts import com.twitter.finagle.{Stack, Status} import com.twitter.finagle.ssl import com.twitter.io.{Buf, Reader, Writer} import com.twitter.util.{Closable, Future, Promise, Time, Throw, Return, Duration} import java.net.SocketAddress import java.security.cert.Certificate // Mapped: ideally via a util-codec? /** * A transport is a representation of a stream of objects that may be * read from and written to asynchronously. Transports are connected * to some endpoint, typically via a channel pipeline that performs * encoding and decoding. */ trait Transport[In, Out] extends Closable { self => /** * Write {{req}} to this transport; the returned future * acknowledges write completion. */ def write(req: In): Future[Unit] /** * Read a message from the transport. */ def read(): Future[Out] /** * The status of this transport; see [[com.twitter.finagle.Status$]] for * status definitions. */ def status: Status /** * The channel closed with the given exception. This is the * same exception you would get if attempting to read or * write on the Transport, but this allows clients to listen to * close events. */ val onClose: Future[Throwable] /** * The locally bound address of this transport. */ def localAddress: SocketAddress /** * The remote address to which the transport is connected. */ def remoteAddress: SocketAddress /** * The peer certificate if a TLS session is established. */ def peerCertificate: Option[Certificate] /** * Maps this transport to `Transport[In1, Out2]`. Note, exceptions * in `f` and `g` are lifted to a [[com.twitter.util.Future]]. * * @param f The function applied to `write`s input. * @param g The function applied to the result of a `read` */ def map[In1, Out1](f: In1 => In, g: Out => Out1): Transport[In1, Out1] = new Transport[In1, Out1] { def write(req: In1): Future[Unit] = Future(f(req)).flatMap(self.write) def read(): Future[Out1] = self.read().map(g) def status = self.status val onClose = self.onClose def localAddress = self.localAddress def remoteAddress = self.remoteAddress def peerCertificate = self.peerCertificate def close(deadline: Time) = self.close(deadline) override def toString: String = self.toString } } /** * A collection of [[com.twitter.finagle.Stack.Param]]'s useful for configuring * a [[com.twitter.finagle.transport.Transport]]. * * @define $param a [[com.twitter.finagle.Stack.Param]] used to configure */ object Transport { private[finagle] val peerCertCtx = new Contexts.local.Key[Certificate] /** * Retrieve the transport's SSLSession (if any) from * [[com.twitter.finagle.context.Contexts.local]] */ def peerCertificate: Option[Certificate] = Contexts.local.get(peerCertCtx) /** * $param the buffer sizes of a `Transport`. * * @param send An option indicating the size of the send buffer. * If None, the implementation default is used. * * @param recv An option indicating the size of the receive buffer. * If None, the implementation default is used. */ case class BufferSizes(send: Option[Int], recv: Option[Int]) { def mk(): (BufferSizes, Stack.Param[BufferSizes]) = (this, BufferSizes.param) } object BufferSizes { implicit val param = Stack.Param(BufferSizes(None, None)) } /** * $param the liveness of a `Transport`. These properties dictate the * lifecycle of a `Transport` and ensure that it remains relevant. * * @param readTimeout A maximum duration a listener is allowed * to read a request. * * @param writeTimeout A maximum duration a listener is allowed to * write a response. * * @param keepAlive An option indicating if the keepAlive is on or off. * If None, the implementation default is used. */ case class Liveness( readTimeout: Duration, writeTimeout: Duration, keepAlive: Option[Boolean] ) { def mk(): (Liveness, Stack.Param[Liveness]) = (this, Liveness.param) } object Liveness { implicit val param = Stack.Param(Liveness(Duration.Top, Duration.Top, None)) } /** * $param the verbosity of a `Transport`. Transport activity is * written to [[com.twitter.finagle.param.Logger]]. */ case class Verbose(enabled: Boolean) { def mk(): (Verbose, Stack.Param[Verbose]) = (this, Verbose.param) } object Verbose { implicit val param = Stack.Param(Verbose(enabled = false)) } /** * $param the TLS engine for a `Transport`. */ case class TLSClientEngine(e: Option[SocketAddress => ssl.Engine]) { def mk(): (TLSClientEngine, Stack.Param[TLSClientEngine]) = (this, TLSClientEngine.param) } object TLSClientEngine { implicit val param = Stack.Param(TLSClientEngine(None)) } /** * $param the TLS engine for a `Transport`. */ case class TLSServerEngine(e: Option[() => ssl.Engine]) { def mk(): (TLSServerEngine, Stack.Param[TLSServerEngine]) = (this, TLSServerEngine.param) } object TLSServerEngine { implicit val param = Stack.Param(TLSServerEngine(None)) } /** * $param the options (i.e., socket options) of a `Transport`. * * @param noDelay enables or disables `TCP_NODELAY` (Nagle's algorithm) * option on a transport socket (`noDelay = true` means * disabled). Default is `true` (disabled). * * @param reuseAddr enables or disables `SO_REUSEADDR` option on a * transport socket. Default is `true`. */ case class Options(noDelay: Boolean, reuseAddr: Boolean) { def mk(): (Options, Stack.Param[Options]) = (this, Options.param) } object Options { implicit val param: Stack.Param[Options] = Stack.Param(Options(noDelay = true, reuseAddr = true)) } /** * Serializes the object stream from a `Transport` into a * [[com.twitter.io.Writer]]. * * The serialization function `f` can return `Future.None` to interrupt the * stream to facilitate using the transport with multiple writers and vice * versa. * * Both transport and writer are unmanaged, the caller must close when * done using them. * * {{{ * copyToWriter(trans, w)(f) ensure { * trans.close() * w.close() * } * }}} * * @param trans The source Transport. * * @param w The destination [[com.twitter.io.Writer]]. * * @param f A mapping from `A` to `Future[Option[Buf]]`. */ private[finagle] def copyToWriter[A](trans: Transport[_, A], w: Writer) (f: A => Future[Option[Buf]]): Future[Unit] = { trans.read().flatMap(f).flatMap { case None => Future.Done case Some(buf) => w.write(buf) before copyToWriter(trans, w)(f) } } /** * Collates a transport, using the collation function `chunkOfA`, * into a [[com.twitter.io.Reader]]. * * Collation completes when `chunkOfA` returns `Future.None`. The returned * [[com.twitter.io.Reader]] is also a Unit-typed * [[com.twitter.util.Future]], which is satisfied when collation * is complete, or else has failed. * * @note This deserves its own implementation, independently of * using copyToWriter. In particular, in today's implementation, * the path of interrupts are a little convoluted; they would be * clarified by an independent implementation. */ private[finagle] def collate[A](trans: Transport[_, A], chunkOfA: A => Future[Option[Buf]]) : Reader with Future[Unit] = new Promise[Unit] with Reader { private[this] val rw = Reader.writable() become(Transport.copyToWriter(trans, rw)(chunkOfA) respond { case Throw(exc) => rw.fail(exc) case Return(_) => rw.close() }) def read(n: Int) = rw.read(n) def discard(): Unit = { rw.discard() raise(new Reader.ReaderDiscarded) } } /** * Casts an object transport to `Transport[In1, Out1]`. Note that this is * generally unsafe: only do this when you know the cast is guaranteed safe. * This is useful when coercing a netty object pipeline into a typed transport, * for example. */ def cast[In1, Out1](trans: Transport[Any, Any]): Transport[In1, Out1] = trans.map(_.asInstanceOf[Any], _.asInstanceOf[Out1]) } /** * A factory for transports: they are specially encoded as to be * polymorphic. */ trait TransportFactory { def apply[In, Out](): Transport[In, Out] } /** * A `Transport` interface to a pair of queues (one for reading, one * for writing); useful for testing. */ class QueueTransport[In, Out](writeq: AsyncQueue[In], readq: AsyncQueue[Out]) extends Transport[In, Out] { private[this] val closep = new Promise[Throwable] def write(input: In) = { writeq.offer(input) Future.Done } def read(): Future[Out] = readq.poll() onFailure { exc => closep.updateIfEmpty(Throw(exc)) } def status = if (closep.isDefined) Status.Closed else Status.Open def close(deadline: Time) = { val ex = new IllegalStateException("close() is undefined on QueueTransport") closep.updateIfEmpty(Return(ex)) Future.exception(ex) } val onClose = closep val localAddress = new SocketAddress{} val remoteAddress = new SocketAddress{} def peerCertificate: Option[Certificate] = None }
liamstewart/finagle
finagle-core/src/main/scala/com/twitter/finagle/transport/Transport.scala
Scala
apache-2.0
9,444
package de.ummels.dijkstra import de.ummels.prioritymap.PriorityMap /** Implementation of Dijkstra's algorithm using a priority map. */ object DijkstraPriority extends Dijkstra { def dijkstra[N](g: Graph[N])(source: N): (Map[N, Int], Map[N, N]) = { def go(active: PriorityMap[N, Int], acc: Map[N, Int], pred: Map[N, N]): (Map[N, Int], Map[N, N]) = if (active.isEmpty) (acc, pred) else { val (node, cost) = active.head val neighbours = for { (n, c) <- g(node) if !acc.contains(n) && cost + c < active.getOrElse(n, Int.MaxValue) } yield n -> (cost + c) val preds = neighbours mapValues (_ => node) go(active.tail ++ neighbours, acc + (node -> cost), pred ++ preds) } go(PriorityMap(source -> 0), Map.empty, Map.empty) } override def toString = "DijkstraPriority" }
ummels/dijkstra-in-scala
src/main/scala/de/ummels/dijkstra/DijkstraPriority.scala
Scala
isc
853
package cgta.otest package runner import sbt.testing.SubclassFingerprint ////////////////////////////////////////////////////////////// // Copyright (c) 2014 Ben Jackman, Jeff Gomberg // All Rights Reserved // please contact ben@jackman.biz or jeff@cgtanalytics.com // for licensing inquiries // Created by bjackman @ 5/23/14 3:55 PM ////////////////////////////////////////////////////////////// object FrameworkHelp { val funSuiteName = "cgta.otest.FunSuite" def fingerprints(): Array[sbt.testing.Fingerprint] = Array( new SubclassFingerprint { def superclassName = funSuiteName def isModule = true def requireNoArgConstructor = false }, new SubclassFingerprint { def superclassName = funSuiteName def isModule = false def requireNoArgConstructor = true } ) }
cgta/otest
otest/shared/src/main/scala/cgta/otest/runner/FrameworkHelp.scala
Scala
mit
826
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.scalatest.BeforeAndAfterEach import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite} import org.apache.spark.internal.config.UI.UI_ENABLED import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.StaticSQLConf._ /** * Test cases for the builder pattern of [[SparkSession]]. */ class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach { override def afterEach(): Unit = { // This suite should not interfere with the other test suites. SparkSession.getActiveSession.foreach(_.stop()) SparkSession.clearActiveSession() SparkSession.getDefaultSession.foreach(_.stop()) SparkSession.clearDefaultSession() } test("create with config options and propagate them to SparkContext and SparkSession") { val session = SparkSession.builder() .master("local") .config(UI_ENABLED.key, value = false) .config("some-config", "v2") .getOrCreate() assert(session.sparkContext.conf.get("some-config") == "v2") assert(session.conf.get("some-config") == "v2") } test("use global default session") { val session = SparkSession.builder().master("local").getOrCreate() assert(SparkSession.builder().getOrCreate() == session) } test("sets default and active session") { assert(SparkSession.getDefaultSession == None) assert(SparkSession.getActiveSession == None) val session = SparkSession.builder().master("local").getOrCreate() assert(SparkSession.getDefaultSession == Some(session)) assert(SparkSession.getActiveSession == Some(session)) } test("get active or default session") { val session = SparkSession.builder().master("local").getOrCreate() assert(SparkSession.active == session) SparkSession.clearActiveSession() assert(SparkSession.active == session) SparkSession.clearDefaultSession() intercept[IllegalStateException](SparkSession.active) session.stop() } test("config options are propagated to existing SparkSession") { val session1 = SparkSession.builder().master("local").config("spark-config1", "a").getOrCreate() assert(session1.conf.get("spark-config1") == "a") val session2 = SparkSession.builder().config("spark-config1", "b").getOrCreate() assert(session1 == session2) assert(session1.conf.get("spark-config1") == "b") } test("use session from active thread session and propagate config options") { val defaultSession = SparkSession.builder().master("local").getOrCreate() val activeSession = defaultSession.newSession() SparkSession.setActiveSession(activeSession) val session = SparkSession.builder().config("spark-config2", "a").getOrCreate() assert(activeSession != defaultSession) assert(session == activeSession) assert(session.conf.get("spark-config2") == "a") assert(session.sessionState.conf == SQLConf.get) assert(SQLConf.get.getConfString("spark-config2") == "a") SparkSession.clearActiveSession() assert(SparkSession.builder().getOrCreate() == defaultSession) } test("create a new session if the default session has been stopped") { val defaultSession = SparkSession.builder().master("local").getOrCreate() SparkSession.setDefaultSession(defaultSession) defaultSession.stop() val newSession = SparkSession.builder().master("local").getOrCreate() assert(newSession != defaultSession) } test("create a new session if the active thread session has been stopped") { val activeSession = SparkSession.builder().master("local").getOrCreate() SparkSession.setActiveSession(activeSession) activeSession.stop() val newSession = SparkSession.builder().master("local").getOrCreate() assert(newSession != activeSession) } test("create SparkContext first then SparkSession") { val conf = new SparkConf().setAppName("test").setMaster("local").set("key1", "value1") val sparkContext2 = new SparkContext(conf) val session = SparkSession.builder().config("key2", "value2").getOrCreate() assert(session.conf.get("key1") == "value1") assert(session.conf.get("key2") == "value2") assert(session.sparkContext == sparkContext2) // We won't update conf for existing `SparkContext` assert(!sparkContext2.conf.contains("key2")) assert(sparkContext2.conf.get("key1") == "value1") } test("create SparkContext first then pass context to SparkSession") { val conf = new SparkConf().setAppName("test").setMaster("local").set("key1", "value1") val newSC = new SparkContext(conf) val session = SparkSession.builder().sparkContext(newSC).config("key2", "value2").getOrCreate() assert(session.conf.get("key1") == "value1") assert(session.conf.get("key2") == "value2") assert(session.sparkContext == newSC) assert(session.sparkContext.conf.get("key1") == "value1") // If the created sparkContext is passed through the Builder's API sparkContext, // the conf of this sparkContext will not contain the conf set through the API config. assert(!session.sparkContext.conf.contains("key2")) assert(session.sparkContext.conf.get("spark.app.name") == "test") } test("SPARK-15887: hive-site.xml should be loaded") { val session = SparkSession.builder().master("local").getOrCreate() assert(session.sessionState.newHadoopConf().get("hive.in.test") == "true") assert(session.sparkContext.hadoopConfiguration.get("hive.in.test") == "true") } test("SPARK-15991: Set global Hadoop conf") { val session = SparkSession.builder().master("local").getOrCreate() val mySpecialKey = "my.special.key.15991" val mySpecialValue = "msv" try { session.sparkContext.hadoopConfiguration.set(mySpecialKey, mySpecialValue) assert(session.sessionState.newHadoopConf().get(mySpecialKey) == mySpecialValue) } finally { session.sparkContext.hadoopConfiguration.unset(mySpecialKey) } } test("SPARK-31234: RESET command will not change static sql configs and " + "spark context conf values in SessionState") { val session = SparkSession.builder() .master("local") .config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31234") .config("spark.app.name", "test-app-SPARK-31234") .getOrCreate() assert(session.sessionState.conf.getConfString("spark.app.name") === "test-app-SPARK-31234") assert(session.sessionState.conf.getConf(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31234") session.sql("RESET") assert(session.sessionState.conf.getConfString("spark.app.name") === "test-app-SPARK-31234") assert(session.sessionState.conf.getConf(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31234") } test("SPARK-31532: should not propagate static sql configs to the existing" + " active/default SparkSession") { val session = SparkSession.builder() .master("local") .config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31532") .config("spark.app.name", "test-app-SPARK-31532") .getOrCreate() // do not propagate static sql configs to the existing active session val session1 = SparkSession .builder() .config(GLOBAL_TEMP_DATABASE.key, "globalTempDB-SPARK-31532-1") .getOrCreate() assert(session.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532") assert(session1.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532") // do not propagate static sql configs to the existing default session SparkSession.clearActiveSession() val session2 = SparkSession .builder() .config(WAREHOUSE_PATH.key, "SPARK-31532-db") .config(GLOBAL_TEMP_DATABASE.key, value = "globalTempDB-SPARK-31532-2") .getOrCreate() assert(!session.conf.get(WAREHOUSE_PATH).contains("SPARK-31532-db")) assert(session.conf.get(WAREHOUSE_PATH) === session2.conf.get(WAREHOUSE_PATH)) assert(session2.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532") } test("SPARK-31532: propagate static sql configs if no existing SparkSession") { val conf = new SparkConf() .setMaster("local") .setAppName("test-app-SPARK-31532-2") .set(GLOBAL_TEMP_DATABASE.key, "globaltempdb-spark-31532") .set(WAREHOUSE_PATH.key, "SPARK-31532-db") SparkContext.getOrCreate(conf) // propagate static sql configs if no existing session val session = SparkSession .builder() .config(GLOBAL_TEMP_DATABASE.key, "globalTempDB-SPARK-31532-2") .config(WAREHOUSE_PATH.key, "SPARK-31532-db-2") .getOrCreate() assert(session.conf.get("spark.app.name") === "test-app-SPARK-31532-2") assert(session.conf.get(GLOBAL_TEMP_DATABASE) === "globaltempdb-spark-31532-2") assert(session.conf.get(WAREHOUSE_PATH) === "SPARK-31532-db-2") } }
zuotingbing/spark
sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala
Scala
apache-2.0
9,601
/* Copyright 2009-2021 EPFL, Lausanne */ import stainless.lang._ object Closures { def addX(x: Int): Int => Int = { (a: Int) => a + x } def test(x: Int): Boolean = { val add1 = addX(1) val add2 = addX(2) add1(add2(1)) == 4 }.holds } // vim: set ts=4 sw=4 et:
epfl-lara/stainless
frontends/benchmarks/verification/valid/MicroTests/Closures.scala
Scala
apache-2.0
287
/* * Copyright 2016 Frugal Mechanic (http://frugalmechanic.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fm.common.rich import fm.common.EventTargetOrTargets import org.scalajs.dom.raw.{Event, EventTarget} import org.scalajs.jquery.{JQuery, jQuery} import scala.scalajs.js final class RichEventTarget(val self: EventTarget) extends AnyVal with EventTargetOrTargets { protected def jQueryElements: JQuery = jQuery(self) def addEventListener[T <: Event](tpe: String)(f: js.Function1[T,_]): Unit = self.addEventListener(tpe, f) def removeEventListener[T <: Event](tpe: String)(f: js.Function1[T,_]): Unit = self.removeEventListener(tpe, f) }
frugalmechanic/fm-common
js/src/main/scala/fm/common/rich/RichEventTarget.scala
Scala
apache-2.0
1,180
package com.github.takezoe.solr.scala.async import com.github.takezoe.solr.scala.CaseClassMapper import com.github.takezoe.solr.scala.query.{ExpressionParser, QueryTemplate} import org.apache.solr.client.solrj.request.UpdateRequest import org.apache.solr.common.SolrInputDocument import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success} trait IAsyncSolrClient { protected implicit def parser: ExpressionParser /** * Execute given operation in the transaction. * * The transaction is committed if operation was successful. * But the transaction is rolled back if an error occurred. */ def withTransaction[T](operations: => Future[T]): Future[T] = { import scala.concurrent.ExecutionContext.Implicits.global val p = Promise[T]() operations onComplete { case Success(x) => commit() onComplete { case Success(_) => p success x case Failure(t) => p failure t } case Failure(t) => rollback() onComplete (_ => p failure t) } p.future } protected def execute(req: UpdateRequest, promise: Promise[Unit]): Future[Unit] def query(query: String): AbstractAsyncQueryBuilder /** * Add the document. * * @param doc the document to register */ def add(doc: Any): Future[Unit] = { val solrDoc = doc match { case sid: SolrInputDocument => sid case _ => val ret = new SolrInputDocument CaseClassMapper.toMap(doc) map { case (key, value) => ret.addField(key, value) } ret } val req = new UpdateRequest() req.add(solrDoc) execute(req, Promise[Unit]()) } /** * Add the document and commit them immediately. * * @param doc the document to register */ def register(doc: Any): Future[Unit] = { withTransaction { add(doc) } } /** * Delete the document which has a given id. * * @param id the identifier of the document to delete */ def deleteById(id: String): Future[Unit] = { val req = new UpdateRequest() req.deleteById(id) execute(req, Promise[Unit]()) } /** * Delete documents by the given query. * * @param query the solr query to select documents which would be deleted * @param params the parameter map which would be given to the query */ def deleteByQuery(query: String, params: Map[String, Any] = Map()): Future[Unit] = { val req = new UpdateRequest() req.deleteByQuery(new QueryTemplate(query).merge(params)) execute(req, Promise[Unit]()) } def commit(): Future[Unit] def rollback(): Future[Unit] def shutdown(): Unit }
takezoe/solr-scala-client
src/main/scala/com/github/takezoe/solr/scala/async/IAsyncSolrClient.scala
Scala
apache-2.0
2,871
package sbt.internal.parser import org.specs2.mutable._ trait AbstractSpec extends Specification with SplitExpression
Duhemm/sbt
main/src/test/scala/sbt/internal/parser/AbstractSpec.scala
Scala
bsd-3-clause
120
package ca.dubey.music.theory import ca.dubey.music.midi.ChannelInfo import javax.sound.midi.ShortMessage import scala.collection.mutable.ArrayBuffer object Chord { def apply() : Chord = new Chord(List.empty[Key]) def apply(notes : Array[Key]) : Chord = new Chord(notes.toList) def apply(notes : List[Key]) : Chord = new Chord(notes) /** * Construct a Chord from a string of notes * * Example usage: Chord("C E G") */ def apply(s : String) : Chord = { val ss = collection.mutable.Set.empty[String] for (s <- s.split(" ")) { ss += s } val notes = List.newBuilder[Key] for (note <- ss) { notes += Key.fromString(note) } new Chord(notes.result) } } class Chord(val notes : List[Key]) { override def hashCode : Int = notes.hashCode override def equals(other : Any) : Boolean = { other match { case that:Chord => (that canEqual this) && that.notes.equals(this.notes) case _ => false } } def canEqual(other : Any) : Boolean = other.isInstanceOf[Chord] def output = { if (notes.size == 0) { printf("Play rest\n"); } else { printf("Play %s\n", this.toString) } } def simplify = Chord(notes.take(2)) override def toString = notes.map((note) => note.toString).mkString(" ") }
adubey/music
src/main/scala/theory/Chord.scala
Scala
gpl-2.0
1,313
package doodle.examples import doodle.core._ import doodle.syntax._ object Koch { import PathElement._ def kochElements(depth: Int, start: Point, angle: Angle, length: Double): Seq[PathElement] = { if(depth == 0) { Seq(lineTo(start + Vec.polar(length, angle))) } else { val lAngle = angle - 60.degrees val rAngle = angle + 60.degrees val third = length / 3.0 val edge = Vec.polar(third, angle) val mid1 = start + edge val mid2 = mid1 + edge.rotate(-60.degrees) val mid3 = mid2 + edge.rotate( 60.degrees) val end = mid3 + edge kochElements(depth-1, start, angle, third) ++ kochElements(depth-1, mid1, lAngle, third) ++ kochElements(depth-1, mid2, rAngle, third) ++ kochElements(depth-1, mid3, angle, third) } } def koch(depth: Int, length: Double): Image = { val origin = Point.cartesian(0, length/6) Image.openPath(moveTo(origin) +: kochElements(depth, origin, 0.degrees, length)) } val image = allAbove((1 to 4) map { depth => koch(depth, 512) }) }
Angeldude/doodle
shared/src/main/scala/doodle/examples/Koch.scala
Scala
apache-2.0
1,083
/* __ *\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL ** ** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\___/_/ |_/____/_/ | | ** ** |/ ** \* */ package scala.xml // XXX This attempt to make Text not a case class revealed a bug in the pattern // matcher (see ticket #2883) so I've put the case back. (It was/is desirable that // it not be a case class because it is using the antipattern of passing constructor // parameters to the superclass where they become vals, but since they will also be // vals in the subclass, it acquires an underscore to avoid a name clash.) // // object Text { // def apply(data: String) = // if (data != null) new Text(data) // else throw new IllegalArgumentException("tried to construct Text with null") // // def unapply(other: Any): Option[String] = other match { // case x: Text => Some(x.data) // case _ => None // } // } /** The class <code>Text</code> implements an XML node for text (PCDATA). * It is used in both non-bound and bound XML representations. * * @author Burak Emir * * @param text the text contained in this node, may not be null. */ case class Text(_data: String) extends Atom[String](_data) { if (_data == null) throw new IllegalArgumentException("tried to construct Text with null") /** Returns text, with some characters escaped according to the XML * specification. * * @param sb ... * @return ... */ override def buildString(sb: StringBuilder) = Utility.escape(data, sb) }
cran/rkafkajars
java/scala/xml/Text.scala
Scala
apache-2.0
1,894
package com.weibo.datasys.rest import com.typesafe.config.ConfigFactory import scala.util.Try /** * Created by tuoyu on 26/01/2017. */ trait Configuration { import Configuration._ lazy val cluster_name = Try(config.getString("cluster.name")).getOrElse("wolong") // RestService配置 lazy val host = Try(config.getString("service.host")).getOrElse("localhost") lazy val port = Try(config.getInt("service.port")).getOrElse(8080) lazy val expiredTime = Try(config.getInt("service.expiretime")).getOrElse(5) // 数据来源配置 lazy val source = Try(config.getString("data.soruce")).getOrElse(DATA_SOURCE_DB) // DB配置 lazy val db_host = Try(config.getString("db.host")).getOrElse("localhost") lazy val db_port = Try(config.getInt("db.port")).getOrElse(3306) lazy val db_name = Try(config.getString("db.name")).getOrElse("datasys_monitor") lazy val db_user = Try(config.getString("db.user")).getOrElse("hadoop") lazy val db_passwd = Try(config.getString("db.password")).getOrElse("hadoop") lazy val db_url = "jdbc:mysql://%s:%d/%s?autoReconnect=true&useUnicode=true&characterEncoding=utf-8".format(db_host, db_port, db_name) lazy val db_driver = "com.mysql.jdbc.Driver" // Web接口配置 lazy val web_url_prefix = Try(config.getString("web.url_prefix")).getOrElse("http://mlplat.intra.weibo.com/math") lazy val web_user_url = web_url_prefix + "/" + Try(config.getString("web.user_url")).getOrElse("user") lazy val web_group_url = web_url_prefix + "/" + Try(config.getString("web.group_url")).getOrElse("getGroup") lazy val web_task_url = web_url_prefix + "/" + Try(config.getString("web.task_url")).getOrElse("getTask") lazy val web_update_task_url = web_url_prefix + "/" + Try(config.getString("web.update_task_url")).getOrElse("updateTask?task_id=%s&status=%s") lazy val web_timeout = Try(config.getInt("web.timeout")).getOrElse(5) // mesos 配置 lazy val mesos_url = Try(config.getString("mesos.master")).getOrElse("10.77.136.42:5050") lazy val mesos_default_user = Try(config.getString("mesos.default_user")).getOrElse("hadoop") lazy val mesos_framework_name = Try(config.getString("mesos.framework.name")).getOrElse("weibo.wolong") // 读取RestService配置 val config = ConfigFactory.load() } object Configuration extends { val DATA_SOURCE_DB = "db" val dATA_SOURCE_WEB = "web" }
batizty/wolong
src/main/scala/com/weibo/datasys/rest/Configuration.scala
Scala
apache-2.0
2,364
package com.clarifi.reporting package relational import com.clarifi.reporting._ import com.clarifi.reporting.sql._ import com.clarifi.reporting.backends._ import com.clarifi.reporting.Reporting._ import com.clarifi.reporting.util.PartitionedSet import DB._ import PrimT._ import ReportingUtils.simplifyPredicate import SqlPredicate._ import SqlExpr.compileOp import scalaz._ import scalaz.IterV._ import scalaz.Id._ //import Scalaz.{^ => _, _} import scalaz.std.indexedSeq.{toNel => _, _} import scalaz.std.vector.{toNel => _, _} import scalaz.std.map._ import scalaz.std.function._ import scalaz.std.option._ import scalaz.std.list._ import scalaz.std.string._ import scalaz.std.anyVal._ import scalaz.syntax.monad._ import scalaz.syntax.traverse.{ToFunctorOps => _, _} import com.clarifi.machines._ import Plan.awaits import Tee.{ right, left } import org.apache.log4j.Logger import com.clarifi.reporting.util.EnrichedLogger._ case class SqlPrg(h: Header, prg: List[SqlStatement], q: SqlQuery, refl: Reflexivity[ColumnName]) case class MemPrg(h: Header, prg: List[SqlStatement], p: OrderedProcedure[DB, Record], refl: Reflexivity[ColumnName]) class SqlScanner(implicit emitter: SqlEmitter) extends Scanner[DB] { private[this] def logger = Logger getLogger this.getClass import AggFunc._ private[this] def fillTable(table: TableName, header: Header, from: SqlQuery ): List[SqlStatement] = { val c = SqlCreate(table = table, header = header) List(c, SqlInsert(table, c.hints.sortColumns(header.keySet), from)) } // `attr`: The codomain attribute def compileAggFunc(attr: Attribute, f: AggFunc, attrs: String => SqlExpr): SqlExpr = f match { case Count => FunSqlExpr("COUNT", List(Verbatim("*"))) case Sum(x) => FunSqlExpr("SUM", List(compileOp(x, attrs))) case Avg(x) => FunSqlExpr("AVG", List(compileOp(x, attrs))) case Min(x) => FunSqlExpr("MIN", List(compileOp(x, attrs))) case Max(x) => FunSqlExpr("MAX", List(compileOp(x, attrs))) /** @todo MSP - SQLite does not support STDDEV or VAR, work around somehow? */ case Stddev(x) => emitter.emitStddevPop(compileOp(x, attrs)) case Variance(x) => emitter.emitVarPop(compileOp(x, attrs)) } val exec = new SqlExecution() import exec._ def sequenceSql(p : List[SqlStatement]) : DB[Unit] = p.distinct.traverse_ { case SqlLoad(tn, h, pc) => bulkLoad(h, tn, pc) case SqlIfNotExists(tn,stats) => { val tecol = "tableExists" val sql = emitter.checkExists(tn, tecol) logger ltrace ("Executing sql: " + sql.run) catchException(DB.transaction(withResultSet(sql, rs => { rs.next() && (rs.getObject(tecol) ne null) }.point[DB]).flatMap(b => if (!b) sequenceSql(stats) else ().point[DB]))) .map(_ fold (e => logger error ("While executing: " + e.getMessage), identity)) } //TODO write job to clean out old memos case x => val sql = x.emitSql(emitter) logger ltrace ("Executing sql: " + sql.run) DB.executeUpdate(sql) } def scanMem[A:Monoid](m: Mem[Nothing, Nothing], f: Process[Record, A], order: List[(String, SortOrder)] = List()): DB[A] = { implicit val sup = Supply.create compileMem(Optimizer.optimize(m), (x:Nothing) => x, (x: Nothing) => x) match { case MemPrg(h, p, q, rx) => for { _ <- sequenceSql(p) a <- q(order) map (_ andThen f execute) } yield a } } def scanRel[A:Monoid](m: Relation[Nothing, Nothing], f: Process[Record, A], order: List[(String, SortOrder)] = List()): DB[A] = { implicit val sup = Supply.create compileRel(Optimizer.optimize(m), (x:Nothing) => x, (x: Nothing) => x) match { case SqlPrg(h, p, q, rx) => for { _ <- sequenceSql(p) a <- scanQuery(orderQuery(q, order), h) map (_ andThen f execute) } yield a } } def scanExt[A:Monoid](m: Ext[Nothing, Nothing], f: Process[Record, A], order: List[(String, SortOrder)] = List()): DB[A] = m match { case ExtRel(r, db) => scanRel(r, f, order) case ExtMem(mem) => scanMem(mem, f, order) } def guidName = "t" + sguid def freshName(implicit sup: Supply) = "t" + sup.fresh def orderQuery(sql: SqlQuery, order: List[(String, SortOrder)])(implicit sup: Supply): SqlQuery = sql match { case sel : SqlSelect if sel.attrs.size > 0 && sel.limit == (None, None) => sel copy ( orderBy = order collect { case (col, ord) if (sel.attrs.get(col) map (_.deparenthesize) match { case None | Some(LitSqlExpr(_)) => false case Some(_) => true }) => sel.attrs(col) -> ord(SqlAsc, SqlDesc) } ) case _ if order.nonEmpty => val s = freshName SqlSelect( tables = Map(TableName(s) -> sql), orderBy = order.map { case (col, ord) => ColumnSqlExpr(TableName(s), col) -> ord(SqlAsc, SqlDesc) } ) case _ => sql } import SortOrder._ def compileMem[M,R](m: Mem[R, M], smv: M => MemPrg, srv: R => SqlPrg)(implicit sup: Supply): MemPrg = m match { case VarM(v) => smv(v) case LetM(ext, expr) => val p = ext match { case ExtMem(e) => compileMem(e, smv, srv) case ExtRel(e, _) => { val SqlPrg(h, ps, q, rx) = compileRel(e, smv, srv) MemPrg(h, ps, o => scanQuery(orderQuery(q, o), h), rx) } } val MemPrg(h, ps, pop, rx) = p val ep = compileMem(expr, (v: MLevel[R, M]) => v match { case MTop => MemPrg(h, List(), pop, rx) case MPop(mex) => compileMem(mex, smv, srv) }, srv) ep copy (prg = ps ++ ep.prg) case EmbedMem(ExtMem(e)) => compileMem(e, smv, srv) case EmbedMem(ExtRel(e, _)) => // TODO: Ditto val SqlPrg(h, ps, q, rx) = compileRel(e, smv, srv) MemPrg(h, ps, o => scanQuery(orderQuery(q, o), h), rx) case CombineM(e, attr, op) => val MemPrg(h, ps, q, rx) = compileMem(e, smv, srv) MemPrg(h + (attr.name -> attr.t), ps, o => if (!o.toMap.contains(attr.name)) q(o).map(_.map(t => t + (attr.name -> op.eval(t)))) else { val oo = o takeWhile { case (n, _) => n != attr.name } q(oo).map(_ map (t => t + (attr.name -> op.eval(t))) andThen sorting(oo, o)) }, rx combineAll(Map(attr.name -> op), { case x => x }, x => x)) case AggregateM(e, attr, op) => val MemPrg(h, ps, q, rx) = compileMem(e, smv, srv) MemPrg(Map(attr.name -> attr.t), ps, _ => q(List()) map (_ andThen reduceProcess(op, attr.t).outmap(p => Map(attr.name -> p))), ForallTups(Map(attr.name -> None), PartitionedSet.zero)) case FilterM(m, p) => val MemPrg(h, ps, q, rx) = compileMem(m, smv, srv) MemPrg(h, ps, o => q(o) map (_ andThen Process.filtered(Predicates.toFn(simplifyPredicate(p, rx)))), rx && Predicates.constancies(p)) case MergeOuterJoin(m1, m2) => val r1 = compileMem(m1, smv, srv) val r2 = compileMem(m2, smv, srv) val MemPrg(h1, p1, q1, rx1) = r1 val MemPrg(h2, p2, q2, rx2) = r2 val jk = h1.keySet intersect h2.keySet MemPrg(h1 ++ h2, p1 ++ p2, o => { val prefix = o takeWhile (x => jk contains x._1) val preo = prefix ++ jk.view.filterNot(prefix.toMap.contains).map(_ -> Asc) val ord: Order[Record] = recordOrd(preo) val merged = ^(q1(preo), q2(preo))( (q1p, q2p) => q1p.tee(q2p)(Tee.mergeOuterJoin((r: Record) => r filterKeys jk, (r: Record) => r filterKeys jk )(ord)).map { case This(a) => (h2 map (kv => kv._1 -> NullExpr(kv._2))) ++ a case That(b) => (h1 map (kv => kv._1 -> NullExpr(kv._2))) ++ b case Both(a, b) => a ++ b }) if (prefix == o) merged else merged map (_ andThen sorting(prefix, o)) }, rx1 && rx2) case HashInnerJoin(m1, m2) => val r1 = compileMem(m1, smv, srv) val r2 = compileMem(m2, smv, srv) val MemPrg(h1, p1, q1, rx1) = r1 val MemPrg(h2, p2, q2, rx2) = r2 val jk = h1.keySet intersect h2.keySet MemPrg(h1 ++ h2, p1 ++ p2, o => { val os = o.toMap.keySet val o1 = os -- h1.keySet val o2 = os -- h2.keySet val prefix1 = o takeWhile (x => h1.keySet(x._1)) val prefix2 = o takeWhile (x => h2.keySet(x._1)) if (o1.isEmpty) hashJoin(q2(List()), q1(o), jk) else if (o2.isEmpty) hashJoin(q1(List()), q2(o), jk) else if (prefix1.length > prefix2.length) hashJoin(q2(List()), q1(prefix1), jk) map (_ andThen sorting(prefix1, o)) else hashJoin(q1(List()), q2(prefix2), jk) map (_ andThen sorting(prefix2, o) )}, rx1 && rx2) case HashLeftJoin(inner, outer) => val MemPrg(hin, pin, qin, rxin) = compileMem(inner, smv, srv) val MemPrg(hout, pout, qout, rxout) = compileMem(outer, smv, srv) val jk = hin.keySet intersect hout.keySet val nulls : Record = hout collect { case (col, ty) if !(jk contains col) => col -> NullExpr(ty) } MemPrg(hin ++ hout, pin ++ pout, o => { val pfx = o takeWhile(x => hin.keySet(x._1)) if (pfx.length == o.length) // no sorting needed leftHashJoin(qin(o), qout(List()), jk, nulls) else leftHashJoin(qin(pfx), qout(List()), jk, nulls) map (_ andThen sorting(pfx, o)) }, rxin && rxout) case AccumulateM(parentIdCol, nodeIdCol, expr, l, t) => val MemPrg(ht,pt,qt,rt) = compileMem(t, smv, srv) val MemPrg(hl,pl,ql,rl) = compileMem(l, smv, srv) val accumProc: OrderedProcedure[DB,Record] = { (ord: List[(String, SortOrder)]) => (db: java.sql.Connection) => // read both the tree and leaves into memory because working on this in a streaming // fashion is difficult var leaves = Map[PrimExpr, Record]() // map from nid to (|..nid..v|) var tree = Map[PrimExpr, Record]() // map from nid to (|..nid..pid..k|) qt(List())(db).foreach( rec => tree = tree + (rec(nodeIdCol.name) -> rec) ) ql(List())(db).foreach( rec => leaves = leaves + (rec(nodeIdCol.name) -> rec) ) // map from nid to all of the leaves in its subtree // for example, the root node will have every list var descendantLeaves: Map[PrimExpr, Vector[Record]] = Map().withDefaultValue(Vector()) // for each leaf, recursively add itself to its parent, grandparent, (great^n)grandparent's list def insertIntoDescendantLeaves(parentId: PrimExpr, leaf: Record): Unit = { // Make sure the leaf's parent exists in the tree, because a leaf could be an orphan // and it doesn't make sense to add orphans to descendantLeaves tree.get(parentId) foreach { parent => descendantLeaves = descendantLeaves + (parentId -> (descendantLeaves(parentId) :+ leaf)) val grandParentId = parent(parentIdCol.name) if( tree.contains(grandParentId) ) // Are we at the root node yet? insertIntoDescendantLeaves(grandParentId, leaf) } } leaves.foreach { case (nid,leaf) => insertIntoDescendantLeaves( leaf(nodeIdCol.name), leaf ) } descendantLeaves.view.map { case (nid, leafVec) => val leafRec = Literal( NonEmptyList( leafVec.head, leafVec.tail: _* ) ) val subquery = Mem.instantiate(leafRec, expr) val MemPrg(sh, sp, sq, sr) = compileMem(subquery, smv, srv) if (!sp.isEmpty) sys.error("subqueries of groupBy cannot 'let' new temp tables: " + subquery) else { val nodeTup = (nodeIdCol.name -> nid) sq(List())(db).map(_ + nodeTup) } }.foldLeft(Mem.zeroProcedure[Record])((p1,p2) => Mem.append(p1,p2)). andThen(sorting(List(), ord)) } implicit def err(s: String, msgs: String*): Option[Nothing] = None val hdr = Typer.accumulateType[Option]( parentIdCol.toHeader, nodeIdCol.toHeader, v => Typer.memTyper(Mem.instantiate(EmptyRel(v), expr).substPrg(srv, smv)).toOption, Typer.memTyper(l.substPrg(srv,smv)).toOption.get).get MemPrg(hdr, pt ++ pl, accumProc, rt.filterKeys((k:String) => k == nodeIdCol.name)) case GroupByM(m, k, expr) => def toOp(a: Attribute) = Op.ColumnValue(a.name, a.t) val MemPrg(h, p, q, r) = compileMem(m, smv, srv) val knames = k.map(_.name).toSet val joined: OrderedProcedure[DB, Record] = { (ord: List[(String, SortOrder)]) => (db: java.sql.Connection) => val kord_ = ord.filter { p => knames.contains(p._1) } val kord = kord_ ++ (knames -- kord_.map(_._1)).map(n => (n, SortOrder.Asc)) val v2ord = ord filterNot (kord_ contains) // this will actually run the outermost query, but this is prob // fine, since we are guarded by a function val rows: Procedure[scalaz.Id.Id, Record] = Mem.join { q(kord)(db). andThen( Process.groupingBy((r: Record) => r.filterKeys(knames.contains))). map { case (key, recs) => val x = Literal(NonEmptyList(recs.head, recs.tail: _*)) val subquery = Mem.instantiate(x, expr) val MemPrg(sh, sp, sq, sr) = compileMem(subquery, smv, srv) if (!sp.isEmpty) sys.error("subqueries of groupBy cannot 'let' new temp tables: " + subquery) else sq(v2ord)(db).map(_ ++ key) } } rows andThen sorting(kord ++ v2ord, ord) } implicit def err(s: String, msgs: String*): Option[Nothing] = None val hdr = Typer.groupByType[Option]( h, k.map(_.tuple).toMap, v => Typer.memTyper(Mem.instantiate(EmptyRel(v), expr).substPrg(srv, smv)).toOption).get MemPrg(hdr, p, joined, r.filterKeys(knames.contains)) case ProcedureCall(args, h, proc, namespace) => sys.error("TODO") case l@Literal(ts) => MemPrg(l.header, List(), so => com.clarifi.machines.Source.source(sort(ts.list, so)).idProcedure.point[DB], Reflexivity literal ts) case EmptyRel(h) => MemPrg(h, List(), _ => Machine.stopped.idProcedure.point[DB], KnownEmpty()) case QuoteMem(n) => sys.error("Cannot scan quotes.") case ExceptM(m, cs) => val MemPrg(h, p, q, rx) = compileMem(m, smv, srv) MemPrg(h -- cs, p, q andThen (_ andThen (_ map ((t: Record) => t -- cs))), rx) case ProjectM(m, cs) => val MemPrg(h, p, q, rx) = compileMem(m, smv, srv) MemPrg(cs.map(_._1.tuple), p, q andThen (_ andThen (_ map ((t: Record) => cs.map { case (attr, op) => attr.name -> op.eval(t) }))), combineAll(rx, cs)) case x => sys.error("inconceivable! " + x) } private def hashJoin(q1: DB[Procedure[Id, Record]], q2: DB[Procedure[Id, Record]], jk: Set[String]) = ^(q1, q2)((q1, q2) => q1.tee(q2)(Tee.hashJoin(_ filterKeys jk, _ filterKeys jk)).map(p => p._1 ++ p._2)) private def leftHashJoin(dq1: DB[Procedure[Id, Record]], dq2: DB[Procedure[Id, Record]], jk: Set[String], nulls: Record): DB[Procedure[Id, Record]] = { def build(m: Map[Record, Vector[Record]]): Plan[T[Record, Record], Nothing, Map[Record, Vector[Record]]] = (for { rr <- awaits(right[Record]) k = rr filterKeys jk mp <- build(m.updated(k, m.getOrElse(k, Vector.empty) :+ rr)) } yield mp) orElse Return(m) def augments(m: Map[Record, Vector[Record]], r: Record): Vector[Record] = m.lift(r filterKeys jk) match { case None => Vector(r ++ nulls) case Some(v) => v map (r ++ _) } def emits(v: Vector[Record]): Plan[T[Record, Record], Record, Unit] = v.foldr[Plan[T[Record, Record], Record, Unit]](Return(()))(e => k => Emit(e, () => k)) ^(dq1, dq2)((q1, q2) => q1.tee(q2)(build(Map()) flatMap { m => awaits(left[Record]) flatMap { r => emits(augments(m, r)) } repeatedly })) } private def filterRx(rx: Reflexivity[ColumnName], p: Predicate) = rx && Predicates.constancies(p) private def distinctness(h: Header, rx: Reflexivity[ColumnName], cols: Map[Attribute, Op]): Set[String] = rx match { case KnownEmpty() => Set() case ForallTups(_, _) => if ((h.keySet diff rx.consts.keySet) forall (c => cols exists { case (_, op) => preservesDistinctness(rx, op, c) })) Set() else Set("distinct") } /** @todo SMRC Since this was written, the op language has changed * such that this answers true too often. */ private def preservesDistinctness(rx: Reflexivity[ColumnName], op: Op, col: ColumnName): Boolean = { val ms = op.foldMap((c: ColumnName) => Map(c -> 1)) ms.get(col).map(_ == 1).getOrElse(false) && ms.keySet.forall(c => c == col || rx.consts.isDefinedAt(c)) } private def combineAll(rx: Reflexivity[ColumnName], comb: Map[Attribute, Op], keepOld: Boolean = false) = rx combineAll (mapKeys(comb)(_.name), {case x => x}, identity, keepOld) def compileRel[M,R](m: Relation[M, R], smv: M => MemPrg, srv: R => SqlPrg)(implicit sup: Supply): SqlPrg = { def mkUnion(l: Relation[M, R], r: Relation[M, R], op: (SqlQuery, SqlQuery) => SqlQuery) = { val lc = compileRel(l, smv, srv) val rc = compileRel(r, smv, srv) val SqlPrg(h1, p1, q1, refl1) = lc val SqlPrg(h2, p2, q2, refl2) = rc SqlPrg(h1, p1 ++ p2, op(q1, q2), refl1 || refl2) } def columns(h: Header, rv: TableName) = h.map(x => (x._1, ColumnSqlExpr(rv, x._1))) m match { case VarR(v) => srv(v) case Join(l, r) => val SqlPrg(h1, p1, q1, refl1) = compileRel(l, smv, srv) val SqlPrg(h2, p2, q2, refl2) = compileRel(r, smv, srv) val jn = (q1, q2) match { case (SqlJoin(xs, ul), SqlJoin(ys, _)) => SqlJoin(xs append ys, ul) case (SqlJoin(xs, ul), _) => SqlJoin(xs append NonEmptyList((q2, TableName(freshName), h2)), ul) case (_, SqlJoin(ys, ur)) => SqlJoin((q1, TableName(freshName), h1) <:: ys, ur) case (_, _) => val un = freshName val ul = freshName val ur = freshName SqlJoin(NonEmptyList((q1, TableName(ul), h1), (q2, TableName(ur), h2)), TableName(un)) } SqlPrg(h1 ++ h2, p1 ++ p2, jn, refl1 && refl2) case JoinOn(l, r, on) => val lc = compileRel(l, smv, srv) val rc = compileRel(r, smv, srv) val un = freshName val ul = freshName val ur = freshName val SqlPrg(h1, p1, q1, refl1) = lc val SqlPrg(h2, p2, q2, refl2) = rc SqlPrg(h1 ++ h2, p1 ++ p2, SqlJoinOn((q1, TableName(ul), h1), (q2, TableName(ur), h2), on, TableName(un)), refl1 && refl2) case Union(l, r) => mkUnion(l, r, SqlUnion(_, _)) case Minus(l, r) => val lc = compileRel(l, smv, srv) val rc = compileRel(r, smv, srv) val un = freshName val ul = freshName val ur = freshName val SqlPrg(h, p1, q1, refl1) = lc val SqlPrg(_, p2, q2, refl2) = rc SqlPrg(h, p1 ++ p2, SqlExcept(q1, TableName(ul), q2, TableName(ur), h), refl1 || refl2) case Filter(r, pred) => val rc = compileRel(r, smv, srv) val un = freshName val SqlPrg(h, p, q, rx) = rc val pred1 = simplifyPredicate(pred, rx) SqlPrg(h, p, q match { case v:SqlSelect if (v.limit._1.isEmpty && v.limit._2.isEmpty) => v.copy(criteria = compilePredicate(pred1, v.attrs) :: v.criteria) case _ => SqlSelect(tables = Map(TableName(un) -> q), attrs = columns(h, TableName(un)), criteria = List(compilePredicate(pred1, ColumnSqlExpr(TableName(un), _)))) }, filterRx(rx, pred)) case Project(r, cols) => val rc = compileRel(r, smv, srv) val un = freshName val SqlPrg(h, p, q, rx) = rc SqlPrg(cols.map(_._1.tuple), p, SqlSelect(tables = Map(TableName(un) -> q), attrs = { val as = columns(h, TableName(un)) cols map { case (attr, op) => (attr.name -> compileOp(op, as)) }}), combineAll(rx, cols)) case Aggregate(r, attr, f) => val rc = compileRel(r, smv, srv) val un = freshName val SqlPrg(h, p, q, _) = rc SqlPrg(Map(attr.name -> attr.t), p, q match { case v:SqlSelect if v.limit == (None, None) => v.copy(attrs = Map(attr.name -> compileAggFunc(attr, f, v.attrs))) case _ => SqlSelect(tables = Map(TableName(un) -> q), attrs = Map(attr.name -> compileAggFunc(attr, f, columns(h, TableName(un))))) }, ForallTups(Map(attr.name -> None), PartitionedSet.zero)) case Except(r, cs) => val rc = compileRel(r, smv, srv) val un = freshName val SqlPrg(h, p, q, rx) = rc SqlPrg(h -- cs, p, SqlSelect(tables = Map(TableName(un) -> q), attrs = columns(h, TableName(un)) -- cs), rx filterKeys (!cs.contains(_))) case Combine(r, attr, op) => val rc = compileRel(r, smv, srv) val un = freshName val SqlPrg(h, p, q, rx) = rc SqlPrg(h + attr.tuple, p, SqlSelect(tables = Map(TableName(un) -> q), attrs = { val as = columns(h, TableName(un)) as + (attr.name -> compileOp(op, as)) }), combineAll(rx, Map(attr -> op), true)) case Limit(r, from, to, order) => val rc = compileRel(r, smv, srv) val u1 = freshName val u2 = freshName val SqlPrg(h, p, q, rx) = rc SqlPrg(h, p, emitter.emitLimit(q, h, TableName(u1), from, to, (toNel(order.map { case (k, v) => ColumnSqlExpr(TableName(u1), k) -> v.apply( asc = SqlAsc, desc = SqlDesc )}).map(_.list).getOrElse( h.keys.map(k => (ColumnSqlExpr(TableName(u1), k), SqlAsc)).toList)), TableName(u2)), rx) case Table(h, n) => SqlPrg(h, List(), FromTable(n, h.keys.toList), Reflexivity.zero) case TableProc(args, oh, src, namespace) => val h = oh.toMap val argable = TableProc.argFunctor.map(args){case (typeName, r) => (TableName(guidName, List(), TableName.Variable(typeName)), compileRel(r, smv, srv))} val un = guidName val sink = TableName(un, List(), TableName.Temporary) SqlPrg(h, TableProc.argFoldable.foldMap(argable)(_._2.prg.toIndexedSeq) :+ SqlCreate(table = sink, header = h) :+ SqlExec(sink, src, namespace, TableProc.argFoldable.foldMap(argable){ case (unt, SqlPrg(ih, _, iq, _)) => fillTable(unt, ih, iq) }, oh map (_._1), argable map (_ bimap (_._1, SqlExpr.compileLiteral))) toList, FromTable(sink, h.keys.toList), Reflexivity.zero) case RelEmpty(h) => val un = guidName SqlPrg(h, List(SqlCreate(table = TableName(un, List(), TableName.Temporary), header = h)), FromTable(TableName(un, List(), TableName.Temporary), h.keys.toList), KnownEmpty()) case QuoteR(_) => sys.error("Cannot scan quotes") case l@SmallLit(ts) => import com.clarifi.machines.Source val h = l.header val un = guidName SqlPrg(h, List(SqlCreate(table = TableName(un, List(), TableName.Temporary), header = h), SqlLoad(TableName(un, List(), TableName.Temporary), h, Source.source(ts.toList).idProcedure.point[DB])), FromTable(TableName(un, List(), TableName.Temporary), h.keys.toList), Reflexivity literal ts) case MemoR(r) => { val rc = compileRel(r,smv,srv) val relHash = "MemoHash_" + r.##.toString val myTN = TableName(relHash, List(), TableName.Persistent) val fillStat = fillTable(myTN, rc.h, rc.q) val myPrg = List(SqlIfNotExists(myTN,rc.prg ++ fillStat)) SqlPrg(rc.h, myPrg, FromTable(myTN, rc.h.keys.toList), rc.refl) } case LetR(ext, exp) => val un = guidName val tup = ext match { case ExtRel(rel, _) => // TODO: Handle namespace val rc = compileRel(rel, smv, srv) val SqlPrg(ih, ip, iq, rx) = rc (ih, rx, ip ++ fillTable(TableName(un, List(), TableName.Temporary), ih, iq)) case ExtMem(mem) => val m =compileMem(mem, smv, srv) val MemPrg(h, p, pop, rx) = m (h, rx, p ++ List(SqlCreate(table = TableName(un, List(), TableName.Temporary), header = h), SqlLoad(TableName(un, List(), TableName.Temporary), h, pop(List())))) } val (ih, rx1, ps) = tup val ec = compileRel(exp, smv, (v: RLevel[M,R]) => v match { case RTop => SqlPrg(ih, List(), FromTable(TableName(un, List(), TableName.Temporary), ih.keys.toList), rx1) case RPop(e) => compileRel(e, smv, srv) }) val SqlPrg(h, p, q, rx2) = ec SqlPrg(h, ps ++ p, q, rx2) case SelectR(rs, cs, where) => // Here be dragons. val prgs = rs.map(compileRel(_, smv, srv)) val (stmts, hs, qs, rx) = prgs.foldRight((List[SqlStatement](), List[Header](), List[SqlQuery](), Reflexivity.zero[ColumnName])) { case (SqlPrg(h, stmts, q, rx), (astmts, hs, qs, rxs)) => (stmts ++ astmts, h :: hs, q :: qs, rx && rxs) } val hsp = hs.map(h => freshName -> h) val rx1 = filterRx(rx, where) val rx2 = combineAll(rx1, cs) val columnLocs: Map[ColumnName, List[String]] = hsp.toIterable flatMap { case (subUn, h) => h.keys map (_ -> subUn) } groupBy (_._1) mapValues (_ map (_._2) toList) val lookupColumn = (c:ColumnName) => ColumnSqlExpr(TableName(columnLocs(c).head), c) val h = hs.foldRight(Map():Header)(_ ++ _) SqlPrg(cs.map(_._1.tuple), stmts, SqlSelect(options = distinctness(h, rx1, cs), attrs = cs.map { case (attr, op) => attr.name -> compileOp(op, lookupColumn) }, tables = (hsp zip qs) map { case ((un, _), q) => TableName(un) -> q } toMap, criteria = compilePredicate(simplifyPredicate(where, rx), lookupColumn) :: (for { natJoin <- columnLocs (colName, sources) = natJoin natJoinAtom <- sources zip sources.tail (l, r) = natJoinAtom } yield SqlEq(ColumnSqlExpr(TableName(l), colName), ColumnSqlExpr(TableName(r), colName))).toList), rx2) } } }
ermine-language/ermine-legacy
src/main/scala/com/clarifi/reporting/relational/SqlScanner.scala
Scala
bsd-2-clause
29,007
package org.improving.scalify import Scalify._ import org.eclipse.jdt.core.dom // not NamedDecl because the fragments are individually class FieldDeclaration(override val node: dom.FieldDeclaration) extends BodyDeclaration(node) { lazy val FieldDeclaration(javadoc, mods, jtype, fragments) = node override def allFragments = fragments override def emitDirect = emitFields(_.emitValOrVar) def emitAsVal = emitFields(x => VAL) private def emitFields(valOrVar: (dom.VariableDeclarationFragment) => Emission): Emission = { val fragsToEmit = fragments.filter(!_.isDeferredVal) REP(fragsToEmit.map(x => valOrVar(x) ~ x ~ NL)) } } class EnumConstantDeclaration(override val node: dom.EnumConstantDeclaration) extends BodyDeclaration(node) with VariableBound with MethodBound { def vb = node.resolveVariable def mb = node.resolveConstructorBinding override def flags = node.getModifiers override def binding = super[VariableBound].binding } class EnumDeclaration(override val node: dom.EnumDeclaration) extends AbstractTypeDeclaration(node) { lazy val EnumDeclaration(_, _, _, superInterfaceTypes, enumConstants, bodyDecls) = node override def emitDirect: Emission = CLASS ~ name ~ EXTENDS ~ ENUMCLASS ~ BRACES(VAL ~ ARGS(enumConstants.map(_.getName)) ~ EQUALS ~ VALUE ~ NL) } class AnnotationTypeDeclaration(override val node: dom.AnnotationTypeDeclaration) extends AbstractTypeDeclaration(node) { lazy val AnnotationTypeDeclaration(_, _, _, bodyDecls) = node lazy val emits: List[Emission] = bodyDecls.map { case AnnotationTypeMemberDeclaration(_, _, name, jtype, expr) => VAL ~ name ~ COLON ~ jtype ~ emitOpt(expr, EQUALS ~ _) ~ NL case x: dom.BodyDeclaration => x.emit } override def emitDirect: Emission = CLASS ~ name ~ EXTENDS ~ Emit("scala.Annotation") ~ BRACES(REP(emits)) } abstract class AbstractTypeDeclaration(override val node: dom.AbstractTypeDeclaration) extends BodyDeclaration(node) with TypeBound with NamedDecl { def tb = node.resolveBinding override def flags = tb.getModifiers val name = node.getName // misc defaults that we don't want to clutter STD def emitNew: Emission = NEW def emitClassType: Emission = CLASS } class BodyDeclaration(override val node: dom.BodyDeclaration) extends Node(node) with Modifiable { def flags = node.getModifiers def allFragments: List[dom.VariableDeclarationFragment] = Nil def emitDirect = node match { case Initializer(_, _, Block(stmts)) => REP(stmts) case AnnotationTypeMemberDeclaration(_, mods, name, jtype, expr) => val anns: List[dom.Annotation] = mods flatMap { case x: dom.Annotation => List(x) ; case _ => Nil } REP(anns) ~ name ~ jtype ~ emitOpt(expr) ~ NL // TODO case _ => emitDefault } // // helper monkeys // // this.x = x is a no-op def isRedundantAssignment(x: dom.Statement) = x match { case ExpressionStatement(Assignment(FieldAccess( ThisExpression(None), SimpleName(name)), JavaOp("="), SimpleName(rhs))) if name == rhs => true case _ => false } // does this field declaration have the same name as a constructor parameter? def isRedundantVar(x: dom.FieldDeclaration, paramNames: List[String]): Boolean = { val FieldDeclaration(_, _, ftype, fragments) = x for (VariableDeclarationFragment(SimpleName(name), _, _) <- fragments) if (paramNames contains name) return true return false } }
mbana/scalify
src/main/ast/Body.scala
Scala
isc
3,423
package org.trustedanalytics.sparktk.frame.internal.ops.exportdata import org.apache.hadoop.conf.Configuration import org.apache.commons.lang.StringUtils import org.apache.hadoop.hbase.io.ImmutableBytesWritable import org.apache.hadoop.hbase.util.Bytes import org.apache.hadoop.mapreduce.Job import org.apache.hadoop.hbase.mapreduce.TableOutputFormat import org.apache.hadoop.hbase.{ HColumnDescriptor, HTableDescriptor, HBaseConfiguration } import org.apache.hadoop.hbase.client.{ Put, HBaseAdmin } import org.trustedanalytics.sparktk.frame.{ Schema, DataTypes } import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd import org.trustedanalytics.sparktk.frame.internal.{ FrameState, FrameSummarization, BaseFrame } trait ExportToHbaseSummarization extends BaseFrame { /** * Write current frame to HBase table. * * @param tableName The name of the HBase table that will contain the exported frame * @param keyColumnName The name of the column to be used as row key in hbase table * @param familyName The family name of the HBase table that will contain the exported frame */ def exportToHbase(tableName: String, keyColumnName: Option[String] = None, familyName: String = "family") = { execute(ExportToHbase(tableName, keyColumnName, familyName)) } } case class ExportToHbase(tableName: String, keyColumnName: Option[String], familyName: String) extends FrameSummarization[Unit] { require(StringUtils.isNotEmpty(tableName), "Hbase table name is required") require(keyColumnName != null, "Hbase key column name cannot be null") require(StringUtils.isNotEmpty(familyName), "Hbase table family name is required") override def work(state: FrameState): Unit = { ExportToHbase.exportToHbaseTable(state, tableName, keyColumnName, familyName) } } object ExportToHbase { def exportToHbaseTable(frameRdd: FrameRdd, tableName: String, keyColumnName: Option[String], familyName: String) = { val conf = createConfig(tableName) val pairRdd = convertToPairRDD(frameRdd, familyName, keyColumnName.getOrElse(StringUtils.EMPTY)) val hBaseAdmin = new HBaseAdmin(HBaseConfiguration.create()) if (!hBaseAdmin.tableExists(tableName)) { val desc = new HTableDescriptor(tableName) desc.addFamily(new HColumnDescriptor(familyName)) hBaseAdmin.createTable(desc) } else { val desc = hBaseAdmin.getTableDescriptor(tableName.getBytes()) if (!desc.hasFamily(familyName.getBytes())) { desc.addFamily(new HColumnDescriptor(familyName)) hBaseAdmin.modifyTable(tableName, desc) } } pairRdd.saveAsNewAPIHadoopDataset(conf) } /** * Creates pair rdd to save to hbase * * @param rdd initial frame rdd * @param familyColumnName family column name for hbase * @param keyColumnName key column name for hbase * @return pair rdd */ def convertToPairRDD(rdd: FrameRdd, familyColumnName: String, keyColumnName: String) = { rdd.mapRows(_.valuesAsArray()).zipWithUniqueId().map { case (row, index) => buildRow((row, index), rdd.frameSchema, familyColumnName, keyColumnName) } } /** * Create initial configuration for hbase writer * * @param tableName name of hBase table * @return hBase configuration */ private def createConfig(tableName: String): Configuration = { val conf = HBaseConfiguration.create() conf.set(TableOutputFormat.OUTPUT_TABLE, tableName) val job = new Job(conf) job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]]) job.getConfiguration } /** * Builds a row * * @param row row of the original frame * @param schema original schema * @param familyColumnName family column name for hbase * @param keyColumnName key column name for hbase * @return hbase row */ private def buildRow(row: (Array[Any], Long), schema: Schema, familyColumnName: String, keyColumnName: String) = { val columnTypes = schema.columns.map(_.dataType) val columnNames = schema.columns.map(_.name) val familyColumnAsByteArray = Bytes.toBytes(familyColumnName) val valuesAsDataTypes = DataTypes.parseMany(columnTypes.toArray)(row._1) val valuesAsByteArray = valuesAsDataTypes.map(value => { if (null == value) null else Bytes.toBytes(value.toString) }) val keyColumnValue = Bytes.toBytes(keyColumnName + row._2) val put = new Put(keyColumnValue) for (index <- 0 to valuesAsByteArray.length - 1) { if (valuesAsByteArray(index) != null) { put.add(familyColumnAsByteArray, Bytes.toBytes(columnNames(index)), valuesAsByteArray(index)) } } (new ImmutableBytesWritable(keyColumnValue), put) } }
shibanis1/spark-tk
core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/exportdata/ExportToHbase.scala
Scala
apache-2.0
4,876
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */ package example.idl import other.ns.{ExternalDependency, Suit} final case class DependentRecord(dependency: ExternalDependency, number: Int) final case class DependentRecord2(dependency: Suit.Value, name: String) final case class DependentRecord3(dependency: Embedded, value: Boolean)
julianpeeters/avrohugger
avrohugger-core/src/test/expected/standard/example/idl/ImportProtocol.scala
Scala
apache-2.0
352
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.utils.tf.loaders import java.nio.ByteOrder import com.intel.analytics.bigdl.Module import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity, DataFormat} import com.intel.analytics.bigdl.nn.tf.{Conv3D => Conv3DOps} import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.utils.tf.Context import org.tensorflow.framework.NodeDef import scala.reflect.ClassTag class Conv3D extends TensorflowOpsLoader { import Utils._ override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = { val attributes = nodeDef.getAttrMap val (pT, pW, pH) = if (getString(attributes, "padding") == "SAME") { (-1, -1, -1) } else { (0, 0, 0) } val strideList = getIntList(attributes, "strides") require(strideList.head == 1, s"not support strides on batch") val format = getString(attributes, "data_format") val conv = format match { case "NDHWC" => require(strideList(4) == 1, s"not support strides on depth") val dT = strideList(1) val dW = strideList(2) val dH = strideList(3) Conv3DOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC) case "NCDHW" => require(strideList(1) == 1, s"not support strides on depth") val dT = strideList(2) val dW = strideList(3) val dH = strideList(4) Conv3DOps[T](dT, dW, dH, pT, pW, pH, DataFormat.NCHW) case _ => throw new IllegalArgumentException(s"not supported data format: $format") } conv.asInstanceOf[AbstractModule[Activity, Activity, T]] } }
yiheng/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/tf/loaders/Conv3D.scala
Scala
apache-2.0
2,351
package com.github.takezoe.solr.scala.async import java.io.IOException import okhttp3.{Call, Callback, OkHttpClient, Response} import scala.concurrent.Promise object AsyncUtils { /** * A result handler implementation for AsyncHttpClient * which notifies the result of asynchronous request via Promise. */ class CallbackHandler[T](httpClient: OkHttpClient, promise: Promise[T], success: Response => T = (x: Response) => ()) extends Callback { override def onFailure(call: Call, e: IOException): Unit = { promise.failure(e) } override def onResponse(call: Call, response: Response): Unit = try { promise.success(success(response)) } finally { response.close() } } }
takezoe/solr-scala-client
src/main/scala/com/github/takezoe/solr/scala/async/AsyncUtils.scala
Scala
apache-2.0
738
package de.zalando.play.controllers import org.joda.time.format.DateTimeFormat import org.joda.time.{DateTime, LocalDate} /** * An utility class for parsing date and date-time inputs as required by RFC3339 * Based on work done by Chad Okere * Needed to do a manual parsing because Joda Time only supports ISO8601 formats * which is not completely interchangeable with RFC3339 * * As we need different types for Dates and DateTimes for implicit conversions to work, * deliberately using LocalDate here. * * @author slasch * @since 04.01.2016. */ object Rfc3339Util { private val fullDate = DateTimeFormat.forPattern("yyyy-MM-dd") private val shortDateTime = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ssZ") private val shortDTWithTicks = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'") private val fullDTWithTicks = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'") private val dateTime = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSZ") def parseDateTime(datestring: String): DateTime = if(datestring.endsWith("Z") || datestring.endsWith("z")) parseFull(datestring) else parseParts(datestring) def parseDate(datestring: String): LocalDate = fullDate.parseDateTime(datestring).toLocalDate def writeDate(date: LocalDate): String = fullDate.print(date) def writeDateTime(date: DateTime): String = dateTime.print(date) private def parseParts(datestring: String): DateTime = { //step one, split off the timezone. val sepChar = if (datestring.indexOf('+')>0) '+' else '-' val firstpart = datestring.substring(0, datestring.lastIndexOf(sepChar.toInt)) val secondpart = datestring.substring(datestring.lastIndexOf(sepChar.toInt)) //step two, remove the colon from the timezone offset val thirdpart = secondpart.substring(0, secondpart.indexOf(':')) + secondpart.substring(secondpart.indexOf(':') + 1) val dstring = firstpart + thirdpart try { shortDateTime.parseDateTime(dstring) } catch { case pe: IllegalArgumentException => dateTime.parseDateTime(dstring) } } private def parseFull(datestring: String): DateTime = { try { shortDTWithTicks.parseDateTime(datestring) } catch { case p: IllegalArgumentException => fullDTWithTicks.parseDateTime(datestring) } } }
zalando/play-swagger
api/src/main/scala/de/zalando/play/controllers/Rfc3339Util.scala
Scala
mit
2,331
package io.youi.component.types sealed abstract class SizeType(val name: String, val includeNumeric: Boolean = true) object SizeType { case object Auto extends SizeType("auto", includeNumeric = false) case object Centimeter extends SizeType("cm") case object Ch extends SizeType("ch") case object Em extends SizeType("em") case object Ex extends SizeType("ex") case object Inch extends SizeType("in") case object Inherit extends SizeType("inherit", includeNumeric = false) case object Initial extends SizeType("initial", includeNumeric = false) case object Millimeter extends SizeType("mm") case object Percent extends SizeType("%") case object Pica extends SizeType("pc") case object Pixel extends SizeType("px") case object Point extends SizeType("pt") case object Rem extends SizeType("rem") case object ViewportHeight extends SizeType("vh") case object ViewportWidth extends SizeType("vw") case object ViewportMinimum extends SizeType("vmin") case object ViewportMaximum extends SizeType("vmax") private val map = List( Auto, Centimeter, Ch, Em, Ex, Inch, Inherit, Initial, Millimeter, Percent, Pica, Pixel, Point, Rem, ViewportHeight, ViewportWidth, ViewportMinimum, ViewportMaximum ).map(t => t.name -> t).toMap def apply(name: String): SizeType = map.getOrElse(name.toLowerCase, Pixel) }
outr/youi
gui/src/main/scala/io/youi/component/types/SizeType.scala
Scala
mit
1,348
/*-------------------------------------------------------------------------*\\ ** ScalaCheck ** ** Copyright (c) 2007-2018 Rickard Nilsson. All rights reserved. ** ** http://www.scalacheck.org ** ** ** ** This software is released under the terms of the Revised BSD License. ** ** There is NO WARRANTY. See the file LICENSE for the full text. ** \\*------------------------------------------------------------------------ */ package org.scalacheck import org.scalacheck.rng.Seed import language.reflectiveCalls import util.ConsoleReporter /** Represents a collection of properties, with convenient methods * for checking all properties at once. This class is itself a property, which * holds if and only if all of the contained properties hold. * <p>Properties are added in the following way:</p> * * {{{ * object MyProps extends Properties("MyProps") { * property("myProp1") = forAll { (n:Int, m:Int) => * n+m == m+n * } * } * }}} */ @Platform.EnableReflectiveInstantiation class Properties(val name: String) { private val props = new scala.collection.mutable.ListBuffer[(String,Prop)] /** * Changes to the test parameters that are specific to this class. * Can be used to set custom parameter values for this test. */ def overrideParameters(p: Test.Parameters): Test.Parameters = p /** Returns all properties of this collection in a list of name/property * pairs. */ def properties: collection.Seq[(String,Prop)] = props /** Convenience method that checks the properties with the given parameters * (or default parameters, if not specified) * and reports the result on the console. Should only be used when running * tests interactively within the Scala REPL. * * If you need to get the results * from the test use the `check` methods in [[org.scalacheck.Test]] * instead. */ def check(prms: Test.Parameters = Test.Parameters.default): Unit = { val params = overrideParameters(prms) Test.checkProperties( params.withTestCallback(ConsoleReporter(1) chain params.testCallback), this ) } /** Convenience method that makes it possible to use this property collection * as an application that checks itself on execution. Calls `System.exit` * with the exit code set to the number of failed properties. */ def main(args: Array[String]): Unit = Test.cmdLineParser.parseParams(args) match { case (applyCmdParams, Nil) => val params = applyCmdParams(overrideParameters(Test.Parameters.default)) val res = Test.checkProperties(params, this) val numFailed = res.count(!_._2.passed) if (numFailed > 0) { println(s"Found $numFailed failing properties.") System.exit(1) } else { System.exit(0) } case (_, os) => println(s"Incorrect options: $os") Test.cmdLineParser.printHelp System.exit(-1) } /** Adds all properties from another property collection to this one */ def include(ps: Properties): Unit = include(ps, prefix = "") /** Adds all properties from another property collection to this one * with a prefix this is prepended to each included property's name. */ def include(ps: Properties, prefix: String): Unit = for((n,p) <- ps.properties) property(prefix + n) = p /** Used for specifying properties. Usage: * {{{ * property("myProp") = ... * }}} */ sealed class PropertySpecifier() { def update(propName: String, p: => Prop) = { props += ((name+"."+propName, Prop.delay(p))) } } lazy val property = new PropertySpecifier() sealed class PropertyWithSeedSpecifier() { def update(propName: String, optSeed: Option[String], p: => Prop) = { val fullName = s"$name.$propName" optSeed match { case Some(encodedSeed) => val seed = Seed.fromBase64(encodedSeed).get props += ((fullName, Prop.delay(p).useSeed(fullName, seed))) case None => props += ((fullName, Prop.delay(p).viewSeed(fullName))) } } } lazy val propertyWithSeed = new PropertyWithSeedSpecifier() }
martijnhoekstra/scala
src/scalacheck/org/scalacheck/Properties.scala
Scala
apache-2.0
4,339
//package services.actor // //import common.HBaseHelper.{HBaseHelper, Row} // ///** // * Created by cwx on 15-11-28. // */ //object SetRow { // def main(args: Array[String]) { // import scala.collection.mutable.{Map => muMap} // val qualifersAndValues = muMap[String, String]() // qualifersAndValues += ("TemplateId" -> "W1") // qualifersAndValues += ("SendTime" -> "100") // qualifersAndValues += ("Tags" -> "tags1") // qualifersAndValues += ("Items" -> "items1") // qualifersAndValues += ("Prioritie" -> "2") // val row = new Row("81a04464c7b4a5ed", "cwx", qualifersAndValues.toMap) // HBaseHelper.setRow("CWX_table", row) // val qualifersAndValues1 = muMap[String, String]() // qualifersAndValues1 += ("TemplateId" -> "W1") // qualifersAndValues1 += ("SendTime" -> "100") // qualifersAndValues1 += ("Tags" -> "tags2") // qualifersAndValues1 += ("Items" -> "items2") // qualifersAndValues1 += ("Prioritie" -> "2") // val row1 = new Row("3309d2ee4369b2e7", "cwx", qualifersAndValues1.toMap) // HBaseHelper.setRow("CWX_table", row1) // val qualifersAndValues2 = muMap[String, String]() // qualifersAndValues2 += ("TemplateId" -> "W1") // qualifersAndValues2 += ("SendTime" -> "100") // qualifersAndValues2 += ("Tags" -> "tags3") // qualifersAndValues2 += ("Items" -> "items3") // qualifersAndValues2 += ("Prioritie" -> "2") // val row2 = new Row("dde6a4dfb83bf8e3", "cwx", qualifersAndValues2.toMap) // HBaseHelper.setRow("CWX_table", row2) // } //}
bash-horatio/ESJ
test/services/actor/SetRow.scala
Scala
apache-2.0
1,527
package us.feliscat.converter.ja import us.feliscat.converter.MultiLingualNgramSegmentator import us.feliscat.m17n.Japanese import us.feliscat.text.StringOption /** * <pre> * Created on 2017/02/11. * </pre> * * @author K.Sakamoto */ class JapaneseNgramSegmentator(nGram: Int) extends MultiLingualNgramSegmentator(nGram) with Japanese { def segmentateWithSpaceChar(segments: Seq[String]): StringOption = { val builder = new StringBuilder() var isFirst: Boolean = true segments foreach { segment: String => if (isFirst) { builder. append(segment) isFirst = false } else { builder. append(DELIMITER). append(segment) } } StringOption(builder.toString) } def segmentateWithCharacter(text: StringOption): StringOption = { merge(segmentate({ import us.feliscat.util.primitive.StringUtils for (segment <- text.getOrElse("").toCodePointArray) yield { new String(Array[Int](segment), 0, nGram) } })) } }
ktr-skmt/FelisCatusZero-multilingual
libraries/src/main/scala/us/feliscat/converter/ja/JapaneseNgramSegmentator.scala
Scala
apache-2.0
1,068
/* * Copyright 2012 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.zipkin.storm import com.twitter.zipkin.gen.{Annotation, Endpoint, Span} import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import scala.collection.JavaConversions._ @RunWith(classOf[JUnitRunner]) class SpanSchemeTest extends FunSuite { val annotation1 = Annotation(1, "cs", Some(Endpoint(1, 2, "service"))) val annotation2 = Annotation(2, "cr", Some(Endpoint(3, 4, "Service"))) val span = Span(12345, "methodcall", 666, None, List(annotation1, annotation2), Nil) val spanScheme = new SpanScheme() val bytes = spanScheme.deserializer.toBytes(span) test("SpanScheme deserializes bytes to span" ) { val spanRecovered = spanScheme.deserializer.fromBytes(bytes) assert(spanRecovered === span) } test("SpanScheme return correct values of the fields") { val expectedValues = Seq(12345, 666, "methodcall", "service", true) val values = spanScheme.deserialize(bytes).toList assert(expectedValues === values) } }
mjwall/zipkin
zipkin-storm/src/test/scala/com/twitter/zipkin/SpanSchemeTest.scala
Scala
apache-2.0
1,617
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package unit package helpers import org.jsoup.Jsoup import org.jsoup.nodes.{Document, Element} import org.jsoup.select.Elements import play.twirl.api.Html import org.scalatest.matchers.must.Matchers trait JsoupHelpers extends Matchers { implicit class RichHtml(html: Html) { def select(cssQuery: String): Elements = parseNoPrettyPrinting(html).select(cssQuery) def getElementById(id: String): Element = parseNoPrettyPrinting(html).getElementById(id) def verifyTableRowText(tableId: String, expectedTableAnswersText: List[String], rowNumber: Int): Unit = { val actualTableAnswers = html.getElementById(tableId).getElementsByTag("tr").get(rowNumber).getElementsByTag("td") for (rowTextNumber <- 0 until actualTableAnswers.size) { val rowText = actualTableAnswers.get(rowTextNumber).text() rowText mustBe expectedTableAnswersText(rowTextNumber) } } def verifyTableHeadings(tableId: String, expectedTableHeadingsText: List[String]): Unit = { val actualTableHeadingsText = html.getElementById(tableId).getElementsByTag("th").text() val actualListOfAllHeadingAnswers = actualTableHeadingsText.split(" ").toList actualListOfAllHeadingAnswers mustBe expectedTableHeadingsText } } // otherwise Jsoup inserts linefeed https://stackoverflow.com/questions/12503117/jsoup-line-feed def parseNoPrettyPrinting(html: Html): Document = { val doc = Jsoup.parse(html.body) doc.outputSettings().prettyPrint(false) doc } def asDocument(html: Html): Document = Jsoup.parse(html.toString()) }
hmrc/help-frontend
test/unit/helpers/JsoupHelpers.scala
Scala
apache-2.0
2,199
package io.github.ptitjes.scott.trainers /* import io.github.ptitjes.scott.HiddenMarkovModel._ import io.github.ptitjes.scott.corpora.Trainer._ import io.github.ptitjes.scott._ import io.github.ptitjes.scott.corpora.Annotation.CoarsePosTag import io.github.ptitjes.scott.corpora._ object RelFreqTrainer extends Trainer.Factory { def name: String = "Freq" override def parameters: Set[Parameter[_]] = Set( ORDER, EmittingTraining.UNKNOWN_THRESHOLD ) override def isIterative: Boolean = false def instantiate(configuration: Configuration): Trainer = new Instance(configuration) class Instance(configuration: Configuration) extends Trainer { import io.github.ptitjes.scott.Utils._ def train(corpus: Corpus): HiddenMarkovModel = { val breadth = corpus.tagSet.size val depth = configuration(ORDER) val size = pow(breadth, depth) val allCategoryCounts = initializeMatrixTree[Int](breadth, depth) val perCategoryCounts = initializeMatrixTree[Int](breadth, depth) corpus.foreach { s: Sentence => var d = 0 var previousState = 0 s.tokens.foreach { token => val tag = token.get(CoarsePosTag) perCategoryCounts(d)(tag)(previousState) += 1 allCategoryCounts(d)(0)(previousState) += 1 if (d < depth) { d += 1 } previousState = (previousState * breadth + tag) % size } } val T = initializeMatrixTree[Double](breadth, depth) for (d <- 0 to depth) { for (i <- 0 until pow(breadth, d)) { for (j <- 0 until breadth) { T(d)(j)(i) = avoidInfinity(log(perCategoryCounts(d)(j)(i)) - log(allCategoryCounts(d)(0)(i))) } } } val (e, ue, dict) = EmittingTraining.train(breadth, corpus, configuration(EmittingTraining.UNKNOWN_THRESHOLD)) HMMGenerative(breadth, depth, T, e, ue, dict) } } } */
ptitjes/scott
scott-core/src/main/scala/io/github/ptitjes/scott/trainers/RelFreqTrainer.scala
Scala
gpl-3.0
1,817
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.json import java.io.Writer import com.fasterxml.jackson.core._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.SpecializedGetters import org.apache.spark.sql.catalyst.util.{ArrayData, DateTimeUtils, MapData} import org.apache.spark.sql.types._ private[sql] class JacksonGenerator( schema: StructType, writer: Writer, options: JSONOptions) { // A `ValueWriter` is responsible for writing a field of an `InternalRow` to appropriate // JSON data. Here we are using `SpecializedGetters` rather than `InternalRow` so that // we can directly access data in `ArrayData` without the help of `SpecificMutableRow`. private type ValueWriter = (SpecializedGetters, Int) => Unit // `ValueWriter`s for all fields of the schema private val rootFieldWriters: Array[ValueWriter] = schema.map(_.dataType).map(makeWriter).toArray private val gen = new JsonFactory().createGenerator(writer).setRootValueSeparator(null) private def makeWriter(dataType: DataType): ValueWriter = dataType match { case NullType => (row: SpecializedGetters, ordinal: Int) => gen.writeNull() case BooleanType => (row: SpecializedGetters, ordinal: Int) => gen.writeBoolean(row.getBoolean(ordinal)) case ByteType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getByte(ordinal)) case ShortType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getShort(ordinal)) case IntegerType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getInt(ordinal)) case LongType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getLong(ordinal)) case FloatType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getFloat(ordinal)) case DoubleType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getDouble(ordinal)) case StringType => (row: SpecializedGetters, ordinal: Int) => gen.writeString(row.getUTF8String(ordinal).toString) case TimestampType => (row: SpecializedGetters, ordinal: Int) => val timestampString = options.timestampFormat.format(DateTimeUtils.toJavaTimestamp(row.getLong(ordinal))) gen.writeString(timestampString) case DateType => (row: SpecializedGetters, ordinal: Int) => val dateString = options.dateFormat.format(DateTimeUtils.toJavaDate(row.getInt(ordinal))) gen.writeString(dateString) case BinaryType => (row: SpecializedGetters, ordinal: Int) => gen.writeBinary(row.getBinary(ordinal)) case dt: DecimalType => (row: SpecializedGetters, ordinal: Int) => gen.writeNumber(row.getDecimal(ordinal, dt.precision, dt.scale).toJavaBigDecimal) case st: StructType => val fieldWriters = st.map(_.dataType).map(makeWriter) (row: SpecializedGetters, ordinal: Int) => writeObject(writeFields(row.getStruct(ordinal, st.length), st, fieldWriters)) case at: ArrayType => val elementWriter = makeWriter(at.elementType) (row: SpecializedGetters, ordinal: Int) => writeArray(writeArrayData(row.getArray(ordinal), elementWriter)) case mt: MapType => val valueWriter = makeWriter(mt.valueType) (row: SpecializedGetters, ordinal: Int) => writeObject(writeMapData(row.getMap(ordinal), mt, valueWriter)) // For UDT values, they should be in the SQL type's corresponding value type. // We should not see values in the user-defined class at here. // For example, VectorUDT's SQL type is an array of double. So, we should expect that v is // an ArrayData at here, instead of a Vector. case t: UserDefinedType[_] => makeWriter(t.sqlType) case _ => (row: SpecializedGetters, ordinal: Int) => val v = row.get(ordinal, dataType) sys.error(s"Failed to convert value $v (class of ${v.getClass}}) " + s"with the type of $dataType to JSON.") } private def writeObject(f: => Unit): Unit = { gen.writeStartObject() f gen.writeEndObject() } private def writeFields( row: InternalRow, schema: StructType, fieldWriters: Seq[ValueWriter]): Unit = { var i = 0 while (i < row.numFields) { val field = schema(i) if (!row.isNullAt(i)) { gen.writeFieldName(field.name) fieldWriters(i).apply(row, i) } i += 1 } } private def writeArray(f: => Unit): Unit = { gen.writeStartArray() f gen.writeEndArray() } private def writeArrayData( array: ArrayData, fieldWriter: ValueWriter): Unit = { var i = 0 while (i < array.numElements()) { if (!array.isNullAt(i)) { fieldWriter.apply(array, i) } else { gen.writeNull() } i += 1 } } private def writeMapData( map: MapData, mapType: MapType, fieldWriter: ValueWriter): Unit = { val keyArray = map.keyArray() val valueArray = map.valueArray() var i = 0 while (i < map.numElements()) { gen.writeFieldName(keyArray.get(i, mapType.keyType).toString) if (!valueArray.isNullAt(i)) { fieldWriter.apply(valueArray, i) } else { gen.writeNull() } i += 1 } } def close(): Unit = gen.close() def flush(): Unit = gen.flush() /** * Transforms a single InternalRow to JSON using Jackson * * @param row The row to convert */ def write(row: InternalRow): Unit = { writeObject { writeFields(row, schema, rootFieldWriters) } } def writeLineEnding(): Unit = { gen.writeRaw('\\n') } }
sachintyagi22/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala
Scala
apache-2.0
6,553
package com.airbnb.scheduler.api import org.specs2.mutable.SpecificationWithJUnit import com.airbnb.scheduler.jobs._ import com.airbnb.scheduler.jobs.DependencyBasedJob import com.airbnb.scheduler.jobs.DockerContainer import com.airbnb.scheduler.jobs.ScheduleBasedJob import com.airbnb.scheduler.jobs.EnvironmentVariable import com.airbnb.utils.JobDeserializer import com.airbnb.utils.JobSerializer import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.module.SimpleModule import org.joda.time.Minutes import org.specs2.mutable.SpecificationWithJUnit import com.airbnb.utils.{JobSerializer, JobDeserializer} class SerDeTest extends SpecificationWithJUnit { "SerializerAndDeserializer" should { "serialize and deserialize a DependencyBasedJob correctly" in { val objectMapper = new ObjectMapper val mod = new SimpleModule("JobModule") mod.addSerializer(classOf[BaseJob], new JobSerializer) mod.addDeserializer(classOf[BaseJob], new JobDeserializer) objectMapper.registerModule(mod) val environmentVariables = Seq( EnvironmentVariable("FOO", "BAR"), EnvironmentVariable("AAAA", "BBBB") ) val volumes = Seq( Volume(Option("/host/dir"), "container/dir", Option(VolumeMode.RO)), Volume(None, "container/dir", None) ) val container = DockerContainer("dockerImage", volumes, NetworkMode.BRIDGE) val arguments = Seq( "-testOne" ) val a = new DependencyBasedJob(Set("B", "C", "D", "E"), "A", "noop", Minutes.minutes(5).toPeriod, 10L, 20L, "fooexec", "fooflags", 7, "foo@bar.com", "TODAY", "YESTERDAY", true, container = container, environmentVariables = environmentVariables, shell = false, arguments = arguments, softError = true) val aStr = objectMapper.writeValueAsString(a) val aCopy = objectMapper.readValue(aStr, classOf[DependencyBasedJob]) aCopy must_== a } "serialize and deserialize a ScheduleBasedJob correctly" in { val objectMapper = new ObjectMapper val mod = new SimpleModule("JobModule") mod.addSerializer(classOf[BaseJob], new JobSerializer) mod.addDeserializer(classOf[BaseJob], new JobDeserializer) objectMapper.registerModule(mod) val environmentVariables = Seq( EnvironmentVariable("FOO", "BAR"), EnvironmentVariable("AAAA", "BBBB") ) val volumes = Seq( Volume(Option("/host/dir"), "container/dir", Option(VolumeMode.RW)), Volume(None, "container/dir", None) ) val container = DockerContainer("dockerImage", volumes, NetworkMode.HOST) val arguments = Seq( "-testOne" ) val a = new ScheduleBasedJob("FOO/BAR/BAM", "A", "noop", Minutes.minutes(5).toPeriod, 10L, 20L, "fooexec", "fooflags", 7, "foo@bar.com", "TODAY", "YESTERDAY", true, container = container, environmentVariables = environmentVariables, shell = true, arguments = arguments, softError = true) val aStr = objectMapper.writeValueAsString(a) val aCopy = objectMapper.readValue(aStr, classOf[ScheduleBasedJob]) aCopy must_== a } } }
doronin/chronos
src/test/scala/com/airbnb/scheduler/api/SerDeTest.scala
Scala
apache-2.0
3,220
/** * (c) Copyright 2013 WibiData, Inc. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kiji.schema.shell import scala.collection.JavaConversions._ import org.specs2.mutable._ import org.apache.avro.Schema import org.kiji.schema.avro.CellSchema import org.kiji.schema.avro.ColumnDesc import org.kiji.schema.avro.FamilyDesc import org.kiji.schema.avro.TableLayoutDesc import org.kiji.schema.layout.InvalidLayoutSchemaException import org.kiji.schema.shell.api.Client import org.kiji.schema.shell.avro.XYRecord import org.kiji.schema.avro.AvroSchema import org.kiji.schema.shell.util.KijiIntegrationTestHelpers /** * Tests that DDL commands affecting column schemas respect validation requirements and * operate correctly on validationg layouts (&gt;= layout-1.3). */ class TestSchemaValidation extends SpecificationWithJUnit with KijiIntegrationTestHelpers { "With schema validation enabled, clients" should { "create a table correctly" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | MAXVERSIONS = INFINITY, | TTL = FOREVER, | INMEMORY = false, | COMPRESSED WITH GZIP, | FAMILY info WITH DESCRIPTION 'basic information' ( | name "string" WITH DESCRIPTION 'The user\'s name', | email "string", | age "int"), | MAP TYPE FAMILY integers COUNTER |);""".stripMargin) // Programmatically test proper table creation. // Check that we have created as many locgroups, map families, and group families // as we expect to be here. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName() == "info" }).get infoFamily.getColumns().size mustEqual 3 val nameCol: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "name" }).get val cellSchema: CellSchema = nameCol.getColumnSchema() // Readers, Writers, Written lists and default schema should all match "string" cellSchema.getReaders().size mustEqual 1 cellSchema.getWriters().size mustEqual 1 cellSchema.getWritten().size mustEqual 1 val readerSchema: AvroSchema = cellSchema.getDefaultReader() cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.STRING)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "refuse an incompatible writer schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) try { client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "long") |);""".stripMargin) // Try to add an obviously-incompatible schema. (client.executeUpdate("ALTER TABLE foo ADD WRITER SCHEMA \"string\" FOR COLUMN info:bar") must throwA[InvalidLayoutSchemaException]) } finally { client.close() kijiSystem.shutdown() } ok("Test completed") } "refuse an incompatible reader schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) try { client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "long") |);""".stripMargin) // data written as 'long' cannot necessarily be read as 'int'. (client.executeUpdate("ALTER TABLE foo ADD READER SCHEMA \"int\" FOR COLUMN info:bar") must throwA[InvalidLayoutSchemaException]) } finally { client.close() kijiSystem.shutdown() } ok("Test completed") } "refuse a subtly incompatible writer schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) try { client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) // Since "int" is a reader schema, can't add "long" as a writer schema directly. // Note that a very similar statement worked ok in the "add a reader schema" test (client.executeUpdate("ALTER TABLE foo ADD WRITER SCHEMA \"long\" FOR COLUMN info:bar") must throwA[InvalidLayoutSchemaException]) } finally { client.close() kijiSystem.shutdown() } ok("Test completed") } "add a reader schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) client.executeUpdate("ALTER TABLE foo ADD READER SCHEMA \"long\" FOR COLUMN info:bar") // Test that both reader schemas are present. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 2 // Both "int" and "long" cellSchema.getWriters().size mustEqual 1 // Just "int" cellSchema.getWritten().size mustEqual 1 val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "int" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.INT)) // Check that "long" is the 2nd schema in the readers list. val longSchema: AvroSchema = cellSchema.getReaders()(1) (env.kijiSystem.getSchemaFor(env.instanceURI, longSchema).get mustEqual Schema.create(Schema.Type.LONG)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "add a writer schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "long") |);""".stripMargin) client.executeUpdate("ALTER TABLE foo ADD WRITER SCHEMA \"int\" FOR COLUMN info:bar") // Test that both writer schemas are present. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 // Just "long" cellSchema.getWriters().size mustEqual 2 // Both "int" and "long". cellSchema.getWritten().size mustEqual 2 val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "long" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.LONG)) // Check that "int" is the 2nd schema in the writers list. val intSchema: AvroSchema = cellSchema.getWriters()(1) (env.kijiSystem.getSchemaFor(env.instanceURI, intSchema).get mustEqual Schema.create(Schema.Type.INT)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "drop a writer schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "long") |);""".stripMargin) client.executeUpdate("ALTER TABLE foo DROP WRITER SCHEMA \"long\" FOR COLUMN info:bar") // Test that no writer schemas are present... but it's still in the "written" list. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 // Just "long" cellSchema.getWriters().size mustEqual 0 // nada. cellSchema.getWritten().size mustEqual 1 // "long" remains in the written list. val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "long" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.LONG)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "allow a series of changes to reader schemas permitting int-to-long conversion" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) // We can't add "long" as a writer schema directly, since its reader schema is int. // Change that to long, then add it as the approved writer schema client.executeUpdate("ALTER TABLE foo ADD DEFAULT READER SCHEMA \"long\" FOR COLUMN info:bar") client.executeUpdate("ALTER TABLE foo DROP READER SCHEMA \"int\" FOR COLUMN info:bar") client.executeUpdate("ALTER TABLE foo ADD WRITER SCHEMA \"long\" FOR COLUMN info:bar") // Verify our result state. // * default reader schema should be "long" // * the only schema in readers should be "long" // * the writers and written lists should be { "int", "long" } val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 // Just "long" cellSchema.getWriters().size mustEqual 2 // Both "int" and "long". cellSchema.getWritten().size mustEqual 2 val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "long" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters()(1) mustEqual readerSchema cellSchema.getWritten()(1) mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.LONG)) // Check that "int" is the 1st schema in the writers list. val intSchema: AvroSchema = cellSchema.getWriters()(0) (env.kijiSystem.getSchemaFor(env.instanceURI, intSchema).get mustEqual Schema.create(Schema.Type.INT)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "remove the default reader if it is also dropped as a reader schema" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) // This should also remove it as a default reader schema. client.executeUpdate("ALTER TABLE foo DROP READER SCHEMA \"int\" FOR COLUMN info:bar") // Verify our result state. // * default reader schema should be null // * the readers list should be empty. // * the writers and written lists should be { "int" } val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 0 cellSchema.getWriters().size mustEqual 1 // "int" cellSchema.getWritten().size mustEqual 1 cellSchema.getDefaultReader() must beNull val writerSchema: AvroSchema = cellSchema.getWriters()(0) cellSchema.getWritten()(0) mustEqual writerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, writerSchema).get mustEqual Schema.create(Schema.Type.INT)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "allow creation of empty schema lists for a column" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar ) |);""".stripMargin) // Verify intermediate state: no reader, writer, written schemas for info:bar. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 0 cellSchema.getWriters().size mustEqual 0 cellSchema.getWritten().size mustEqual 0 cellSchema.getDefaultReader() must beNull // We should be able to add any schema we want. // Add to both the readers and writers lists. client.executeUpdate("ALTER TABLE foo ADD SCHEMA \"int\" FOR COLUMN info:bar") // Verify our result state. // readers, writers, written should all be "int". default_reader should still be null. val layout2: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily2: FamilyDesc = layout2.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col2: ColumnDesc = infoFamily2.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema2: CellSchema = col2.getColumnSchema() cellSchema2.getReaders().size mustEqual 1 cellSchema2.getWriters().size mustEqual 1 cellSchema2.getWritten().size mustEqual 1 // ADD SCHEMA will set the reader and writer, but not a default reader schema. cellSchema2.getDefaultReader() must beNull val reader: AvroSchema = cellSchema2.getReaders()(0) (env.kijiSystem.getSchemaFor(env.instanceURI, reader).get mustEqual Schema.create(Schema.Type.INT)) reader mustEqual cellSchema2.getWriters()(0) reader mustEqual cellSchema2.getWritten()(0) client.close() env.kijiSystem.shutdown() ok("Test completed") } "allow setting schema by class" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar ) |);""".stripMargin) client.executeUpdate(""" |ALTER TABLE foo ADD SCHEMA CLASS org.kiji.schema.shell.avro.XYRecord |FOR COLUMN info:bar |""".stripMargin) client.executeUpdate(""" |ALTER TABLE foo ADD DEFAULT READER SCHEMA CLASS org.kiji.schema.shell.avro.XYRecord |FOR COLUMN info:bar |""".stripMargin) val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 cellSchema.getWriters().size mustEqual 1 cellSchema.getWritten().size mustEqual 1 val reader: AvroSchema = cellSchema.getDefaultReader() cellSchema.getReaders().head mustEqual reader cellSchema.getWriters().head mustEqual reader cellSchema.getWritten().head mustEqual reader val readerSchemaName: String = cellSchema.getSpecificReaderSchemaClass().toString() readerSchemaName mustEqual classOf[XYRecord].getName() (env.kijiSystem.getSchemaFor(env.instanceURI, reader).get mustEqual XYRecord.SCHEMA$) client.close() env.kijiSystem.shutdown() ok("Test completed") } "allow Avro schema via DSL" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' (bar) |);""".stripMargin) client.executeUpdate(""" |ALTER TABLE foo ADD SCHEMA AVRO array<int> FOR COLUMN info:bar |""".stripMargin) client.executeUpdate(""" |ALTER TABLE foo ADD DEFAULT READER SCHEMA AVRO array<int> FOR COLUMN info:bar |""".stripMargin) val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 cellSchema.getWriters().size mustEqual 1 cellSchema.getWritten().size mustEqual 1 val reader: AvroSchema = cellSchema.getDefaultReader() cellSchema.getReaders().head mustEqual reader cellSchema.getWriters().head mustEqual reader cellSchema.getWritten().head mustEqual reader cellSchema.getSpecificReaderSchemaClass mustEqual null (env.kijiSystem.getSchemaFor(env.instanceURI, reader).get mustEqual Schema.createArray(Schema.create(Schema.Type.INT))) client.close() env.kijiSystem.shutdown() ok("Test completed") } "let the user add a schema redundantly" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) // If this schema is already present, it should be benign. client.executeUpdate("ALTER TABLE foo ADD SCHEMA \"int\" FOR COLUMN info:bar") // Test that the schema list is just "int" for readers and writers. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 // Just "int" cellSchema.getWriters().size mustEqual 1 cellSchema.getWritten().size mustEqual 1 val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "int" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.INT)) client.close() env.kijiSystem.shutdown() ok("Test completed") } "let the user drop a schema that wasn't attached" in { val uri = getNewInstanceURI() val kijiSystem = getKijiSystem() val client = Client.newInstanceWithSystem(uri, kijiSystem) client.executeUpdate(""" |CREATE TABLE foo WITH DESCRIPTION 'some data' |ROW KEY FORMAT HASHED |WITH LOCALITY GROUP default WITH DESCRIPTION 'main storage' ( | FAMILY info WITH DESCRIPTION 'basic information' ( | bar "int") |);""".stripMargin) // If this schema is not present (e.g, redundant drop) it should be benign. client.executeUpdate("ALTER TABLE foo DROP SCHEMA \"string\" FOR COLUMN info:bar") // Test that the schema list is just "int" for readers and writers. val env = environment(uri, kijiSystem) val layout: TableLayoutDesc = env.kijiSystem.getTableLayout(uri, "foo").get.getDesc val infoFamily: FamilyDesc = layout.getLocalityGroups().head.getFamilies().find({ grp => grp.getName().toString() == "info" }).get val col: ColumnDesc = infoFamily.getColumns().find({ col => col.getName().toString() == "bar" }).get val cellSchema: CellSchema = col.getColumnSchema() cellSchema.getReaders().size mustEqual 1 // Just "int" cellSchema.getWriters().size mustEqual 1 cellSchema.getWritten().size mustEqual 1 val readerSchema: AvroSchema = cellSchema.getDefaultReader() // Should be "int" cellSchema.getReaders().head mustEqual readerSchema cellSchema.getWriters().head mustEqual readerSchema cellSchema.getWritten().head mustEqual readerSchema (env.kijiSystem.getSchemaFor(env.instanceURI, readerSchema).get mustEqual Schema.create(Schema.Type.INT)) client.close() env.kijiSystem.shutdown() ok("Test completed") } } }
kijiproject/kiji-schema-shell
src/test/scala/org/kiji/schema/shell/TestSchemaValidation.scala
Scala
apache-2.0
26,728
package au.com.dius.pact.server import au.com.dius.pact.consumer.DefaultMockProvider import au.com.dius.pact.model._ import com.typesafe.scalalogging.StrictLogging import scala.collection.JavaConversions object Create extends StrictLogging { def create(state: String, path: List[String], requestBody: String, oldState: ServerState, config: Config): Result = { val pact = PactReader.loadPact(requestBody).asInstanceOf[RequestResponsePact] val mockConfig : MockProviderConfig = { if(!config.keystorePath.isEmpty) { MockHttpsKeystoreProviderConfig .httpsKeystoreConfig(config.host, config.sslPort, config.keystorePath, config.keystorePassword, PactSpecVersion.fromInt(config.pactVersion)) } else { MockProviderConfig.create(config.host, config.portLowerBound, config.portUpperBound, PactSpecVersion.fromInt(config.pactVersion)) } } val server = DefaultMockProvider.apply(mockConfig) val port = server.config.getPort val portEntry = port.toString -> server // Not very scala... val newState = (oldState + portEntry) ++ (for ( pathValue <- path ) yield (pathValue -> server)) val body = OptionalBody.body("{\"port\": " + port + "}") server.start(pact) Result(new Response(201, JavaConversions.mapAsJavaMap(ResponseUtils.CrossSiteHeaders ++ Map("Content-Type" -> "application/json")), body), newState) } def apply(request: Request, oldState: ServerState, config: Config): Result = { def errorJson = OptionalBody.body("{\"error\": \"please provide state param and path param and pact body\"}") def clientError = Result(new Response(400, JavaConversions.mapAsJavaMap(ResponseUtils.CrossSiteHeaders), errorJson), oldState) logger.debug(s"path=${request.getPath}") logger.debug(s"query=${request.getQuery}") logger.debug(request.getBody.toString) val result = if (request.getQuery != null) { for { stateList <- CollectionUtils.javaLMapToScalaLMap(request.getQuery).get("state") state <- stateList.headOption paths <- CollectionUtils.javaLMapToScalaLMap(request.getQuery).get("path") body <- Option(request.getBody) } yield create(state, paths, body.getValue, oldState, config) } else None result getOrElse clientError } }
algra/pact-jvm
pact-jvm-server/src/main/scala/au/com/dius/pact/server/Create.scala
Scala
apache-2.0
2,359
package org.jetbrains.plugins.scala.lang.psi.api.base import org.jetbrains.plugins.scala.lang.psi.types.ScType import org.jetbrains.plugins.scala.lang.psi.types.result._ /** * @author adkozlov */ package object types { implicit class ScTypeElementExt(val typeElement: ScTypeElement) extends AnyVal { def calcType: ScType = typeElement.`type`().getOrAny def getParamTypeText: String = if (typeElement.isRepeated) s"_root_.scala.collection.Seq[${typeElement.getText}]" else typeElement.getText } }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/base/types/package.scala
Scala
apache-2.0
555
package com.mentatlabs.nsa package scalac package dsl package experimental trait ScalacYMacroDSL extends ScalacExperimentalDSL with ScalacVerboseDSL with ScalacNoDSL { object Ymacro { val unary_- = new { def -(d: debug.type) = new { def -(l: lite.type) = options.ScalacYMacroDebugLite def -(v: verbose.type) = options.ScalacYMacroDebugVerbose } def -(e: expand.type) = options.ScalacYMacroExpand def -(e: expand.WithString) = options.ScalacYMacroExpand(e.value) def -(n: no.type) = new { def -(e: expand.type) = options.ScalacYMacroNoExpand } } } }
mentat-labs/sbt-nsa
nsa-dsl/src/main/scala/com/mentatlabs/nsa/scalac/dsl/experimental/private/ScalacYMacroDSL.scala
Scala
bsd-3-clause
637
/* * Copyright 2012 Eike Kettner * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.eknet.guice.squire import org.scalatest.FunSuite import org.scalatest.matchers.ShouldMatchers import com.google.inject.TypeLiteral import com.google.inject.internal.MoreTypes import java.lang.reflect.Type /** * @author Eike Kettner eike.kettner@gmail.com * @since 26.11.12 22:12 */ class TypeLiteralTest extends FunSuite with ShouldMatchers { test ("test types of types") { val rtl = new TypeLiteral[List[Set[String]]](){} val tl = SquireBinder.typeLiteral[List[Set[String]]] tl should be (rtl) } }
eikek/publet
guice-squire/src/test/scala/org/eknet/guice/squire/TypeLiteralTest.scala
Scala
apache-2.0
1,132
/** * Copyright 2015 Thomson Reuters * * Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ package cmwell.tools.neptune.export import java.io._ import java.util import java.util.concurrent.{Executors, TimeoutException} import java.util.stream.Collectors import java.util.{Collections, Vector} import com.amazonaws.auth.profile.ProfileCredentialsProvider import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest} import com.amazonaws.{AmazonServiceException, ClientConfiguration, Protocol, SdkClientException} import org.apache.commons.io.{FileUtils, IOUtils} import org.slf4j.LoggerFactory import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.{FiniteDuration, _} object S3ObjectUploader{ val executor = Executors.newFixedThreadPool(1) implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.fromExecutor(executor) protected lazy val logger = LoggerFactory.getLogger("s3_uploader") def init(proxyHost:Option[String], proxyPort:Option[Int]) = { val clientRegion = "us-east-1" val config = new ClientConfiguration config.setProtocol(Protocol.HTTPS) proxyHost.foreach(host => config.setProxyHost(host)) proxyPort.foreach(port => config.setProxyPort(port)) val s3Client = AmazonS3ClientBuilder.standard() .withRegion(clientRegion) .withClientConfiguration(config) .withCredentials(new ProfileCredentialsProvider()) .build() s3Client } def persistChunkToS3Bucket(chunkData:String, fileName:String, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String) = { try{ init(proxyHost, proxyPort).putObject(s3Directory, fileName, chunkData) } catch { case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } def persistChunkToS3Bucket(tmpFile:File, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String, retryCount:Int = 3):Unit = { try{ val s3UploadTask = Future{init(proxyHost, proxyPort).putObject(s3Directory, tmpFile.getName, tmpFile)}(ec) Await.result(s3UploadTask, 5.minutes) tmpFile.delete() } catch { case e:TimeoutException => if(retryCount > 0) { logger.error("S3 upload task run more than 5 minutes..Going to retry") persistChunkToS3Bucket(tmpFile, proxyHost, proxyPort, s3Directory, retryCount-1) } else{ throw new Exception( "S3 upload task duration was more than 5 minutes") } case e: AmazonServiceException => e.printStackTrace() throw e case e: SdkClientException => e.printStackTrace() throw e } } }
dudi3001/CM-Well
tools/export-neptune-tool/src/main/scala/cmwell/tools/neptune/export/S3ObjectUploader.scala
Scala
apache-2.0
3,368
/* * This file is part of the \\BlueLaTeX project. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package gnieh.blue package core package impl package paper import http._ import couch._ import common._ import permission._ import java.util.{ Date, UUID } import java.io.{ File, FileOutputStream, OutputStreamWriter, BufferedWriter } import tiscaf._ import com.typesafe.config.Config import org.osgi.framework.BundleContext import scala.collection.JavaConverters._ import resource._ import scala.sys.process._ import scala.util.{ Try, Success } import gnieh.sohva.control.CouchClient /** Create a new paper. The currently authenticated user is added as author of this paper * * @author Lucas Satabin */ class CreatePaperLet( val couch: CouchClient, config: Config, context: BundleContext, templates: Templates, logger: Logger) extends SyncBlueLet(config, logger) with SyncAuthenticatedLet { def authenticatedAct(user: UserInfo)(implicit talk: HTalk): Try[Any] = (talk.req.param("paper_name"), talk.req.param("paper_title")) match { case (Some(name), Some(title)) => val template = talk.req.param("template").getOrElse("article") val visibility = talk.req.param("visibility").getOrElse("private") val configuration = new PaperConfiguration(config) val manager = entityManager("blue_papers") val newId = s"x${UUID.randomUUID.getMostSignificantBits.toHexString}" for { // create the paper into the database () <- manager.create(newId, None) // add the core component which contains the type, the title paper <- manager.saveComponent(newId, Paper(s"$newId:core", name, new Date)) // add the permissions component to set the creator as author roles <- manager.saveComponent(newId, PaperRole(s"$newId:roles", UsersGroups(Set(user.name), Set()), UsersGroups(Set(), Set()), UsersGroups(Set(), Set()))) p <- phase(newId, visibility) visibility <- manager.saveComponent(newId, p) user <- entityManager("blue_users").getComponent[User](s"org.couchdb.user:${user.name}") } yield { if(configuration.paperDir(newId).mkdirs) { // if the template is not one of the standard styles, // then there should be an associated .sty file to be copied in `resources' // directory val templateName = template match { case "article" | "book" | "report" => // built-in template, ignore it "generic" case "beamer" => "beamer" case cls if configuration.cls(cls).exists => // copy the file to the working directory logDebug(s"Copying class ${configuration.cls(cls)} to paper directory ${configuration.paperDir(newId)}") (configuration.cls(cls) #> new File(configuration.paperDir(newId), cls + ".cls")) ! CreationProcessLogger cls case cls => // just log that the template was not found. It can however be uploaded later by the user logDebug(s"Class $cls was not found, the user will have to upload it later") "generic" } // write the template to the newly created paper for(fw <- managed(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(configuration.paperFile(newId)), "UTF-8")))) { fw.write( templates.layout( s"$templateName.tex", "class" -> template, "title" -> title, "id" -> newId, "author" -> user.map(_.fullName).getOrElse("Your Name"), "email" -> user.map(_.email).getOrElse("your@email.com"), "affiliation" -> user.flatMap(_.affiliation).getOrElse("Institute") ) ) } // create empty bibfile configuration.bibFile(newId).createNewFile import OsgiUtils._ // notifiy creation hooks for(hook <- context.getAll[PaperCreated]) Try(hook.afterCreate(newId, manager)) recover { case e => logError("Error in post paper creation hook", e) } talk.setStatus(HStatus.Created).writeJson(newId) } else { logError(s"Unable to create the paper directory: ${configuration.paperDir(newId)}") talk .setStatus(HStatus.InternalServerError) .writeJson(ErrorResponse("cannot_create_paper", "Something went wrong on the server side")) } } case (_, _) => // missing parameter Success( talk .setStatus(HStatus.BadRequest) .writeJson(ErrorResponse("cannot_create_paper", "Some parameters are missing"))) } private def phase(id: String, visibility: String)(implicit talk: HTalk): Try[PaperPhase] = { val manager = entityManager("blue_users") for(perms <- manager.getComponent[UserPermissions](id)) yield perms match { case Some(perms) => perms.permissions.get(visibility) match { case Some(p) => PaperPhase(f"$id:phase", "writing", p.mapValues(_.map(Permission)), Nil) case None => val builtin = config.getBuiltInPermissions if(builtin.contains(visibility)) PaperPhase(f"$id:phase", "writing", builtin(visibility).mapValues(_.map(Permission)), Nil) else PaperPhase(f"$id:phase", "writing", builtin("private").mapValues(_.map(Permission)), Nil) } case None => val builtin = config.getBuiltInPermissions if(builtin.contains(visibility)) PaperPhase(f"$id:phase", "writing", builtin(visibility).mapValues(_.map(Permission)), Nil) else PaperPhase(f"$id:phase", "writing", builtin("private").mapValues(_.map(Permission)), Nil) } } object CreationProcessLogger extends ProcessLogger { def out(s: => String) = logInfo(s) def err(s: => String) = logError(s) def buffer[T](f: => T) = f } }
tdurieux/bluelatex
blue-core/src/main/scala/gnieh/blue/core/impl/paper/CreatePaperLet.scala
Scala
apache-2.0
6,835
package com.olvind.crud package server import com.olvind.stringifiers.Stringifier import slick.lifted.ColumnOrdered import scala.language.implicitConversions import scala.util.{Failure, Success} trait crudActions extends tableRefs with integrationDb with columnPickers with slickHacks with dbOps { import driver.api._ private [server] object crudAction { import slickHacks._ private val sortOrder: SortOrder ⇒ Rep[Any] ⇒ ColumnOrdered[Any] = o ⇒ if (o =:= Asc) _.asc else _.desc def length[E, U](q: Query[E, U, Seq]): DBIO[TableLength] = q.size.result map TableLength def readTable[ID, TABLE <: AbstractTable[_], LP, P] (ref: TableRef[ID, TABLE, LP, P], paramsOpt: Option[QueryParams]): CrudDbOp[Seq[StrTableRow]] = { val sortedQ: ref.Q ⇒ ref.Q = q ⇒ paramsOpt.flatMap(_.sorting).fold(q) { case (colName, order) ⇒ q sortBy (ColumnPicker(ref.query, colName) _ andThen sortOrder(order)) } val filteredQ: ref.Q ⇒ ref.Q = q ⇒ paramsOpt.flatMap(_.filter).fold(q) { f ⇒ q filter { table ⇒ val col = ColumnPicker(ref.query, f.columnInfo)(table) val valueLower = f.value.value.toLowerCase col.asColumnOf[String].toLowerCase.indexOf(valueLower) =!= -1 } } val pagedQ: ref.Q ⇒ ref.Q = q ⇒ paramsOpt.fold(q)( view ⇒ q drop view.start take view.pageSize ) val q = (filteredQ andThen sortedQ andThen pagedQ)(ref.query) CrudDbOp fromDbio (q.result map (_ map (ref.metadata encodeRow None))) } def readRow[ID: FlatRepShape, TABLE <: AbstractTable[_], LP, P] (ref: TableRef[ID, TABLE, LP, P], idStr: StrRowId): CrudDbOp[Option[StrTableRow]] = for { id ← decodeId(ref, idStr) pOpt ← CrudDbOp fromDbio (ref queryById id).result.headOption } yield pOpt map (ref.metadata encodeRow id.some) def readLinked[ID: FlatRepShape, TABLE <: AbstractTable[_], LP, P] (ref: TableRef[ID, TABLE, LP, P], idStr: StrRowId): CrudDbOp[Seq[StrLinkedRows]] = for { id ← decodeId(ref, idStr) rows ← CrudDbOp fromDbio (DBIO sequence (ref.linked map (_ linkedRows id))) } yield rows def update[ID: FlatRepShape, TABLE <: AbstractTable[_], LP, P] (ref: TableRef[ID, TABLE, LP, P], idStr: StrRowId, col: ColumnRef, valueStr: StrValue): CrudDbOp[(Option[StrValue], StrValue)] = for { id ← decodeId(ref, idStr) stringifier ← CrudDbOp fromOpt (ref.metadata stringifierFor col, XTechnicalMsg(s"table has no column ${col.name.value}")) _ ← CrudDbOp require (ref.base.isEditable, XTechnicalMsg("Table is not editable")) validValue ← decode(idStr, col, stringifier, valueStr) rowQ = ref.base queryById id updateQ = rowQ map (ColumnPicker(rowQ, col) _ andThen ensureOptionalColumn(validValue)) oldValueOpt ← CrudDbOp fromDbio updateQ.result.headOption n ← CrudDbOp fromDbio (updateQ update validValue) _ ← ensureOneRowChanged(n) } yield (oldValueOpt map stringifier.encode map StrValue, StrValue(stringifier encode validValue)) def create[ID: FlatRepShape, TABLE <: AbstractTable[_]] (ref: BaseTableRef[ID, TABLE], strRow: Map[ColumnRef, StrValue]): CrudDbOp[Option[StrRowId]] = { def createQ(row: TABLE#TableElementType): DBIO[Option[ID]] = { val insertAutoGen: DBIO[ID] = ref.query returning (ref.query map ref.idCol) += row val insert: DBIO[Int] = ref.query += row insertAutoGen.asTry flatMap { case Success(id) ⇒ DBIO successful id.some case Failure(_) ⇒ insert map (n ⇒ ref.metadata extractIdFromRow row) } } for { _ ← CrudDbOp require (ref.isEditable, XTechnicalMsg("Table is not editable")) row ← CrudDbOp(ref.metadata.decodeRow(None, strRow)) idOpt ← CrudDbOp fromDbio createQ(row) } yield idOpt map ref.metadata.encodeId } def delete[ID: FlatRepShape, TABLE <: AbstractTable[_]] (ref: BaseTableRef[ID, TABLE], idStr: StrRowId): CrudDbOp[Unit] = for { id ← decodeId(ref, idStr) _ ← CrudDbOp require (ref.base.isEditable, XTechnicalMsg("Table is not editable")) _ ← CrudDbOp require (ref.canDelete, XTechnicalMsg("Can not delete from table")) n ← CrudDbOp fromDbio (ref queryById id).delete _ ← ensureOneRowChanged(n) } yield () private def ensureOneRowChanged(n: Int): CrudDbOp[Unit] = n match { case 1 ⇒ CrudDbOp success (()) case 0 ⇒ CrudDbOp failure XTechnicalMsg("No rows matched") case num ⇒ CrudDbOp failure XTechnicalMsg(s"Rolled back because matched $num rows") } private def decode[T](strRowId: StrRowId, columnRef: ColumnRef, S: Stringifier[T], v: StrValue): CrudDbOp[T] = CrudDbOp[T]( S decode v.value match { case Left(fail) => XValidation(Some(strRowId), Seq((columnRef, Some(fail)))) case Right(id) => XSuccess(id) } ) private def decodeId[ID: FlatRepShape, TABLE <: AbstractTable[_], LP, P](ref: TableRef[ID, TABLE, LP, P], idStr: StrRowId): CrudDbOp[ID] = decode(idStr, ref.metadata.idCol, ref.metadata.idStringifier, idStr.asValue) } }
elacin/slick-crud
crud/jvm/src/main/scala/com/olvind/crud/server/crudActions.scala
Scala
apache-2.0
5,714
/* * Accio is a platform to launch computer science experiments. * Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk> * * Accio is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Accio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Accio. If not, see <http://www.gnu.org/licenses/>. */ package fr.cnrs.liris.lumos.storage.mysql import com.twitter.finagle.mysql.Parameter.{NullParameter, wrap} import com.twitter.finagle.mysql._ import com.twitter.finagle.stats.StatsReceiver import com.twitter.util.{Future, StorageUnit, Time} import fr.cnrs.liris.lumos.domain._ import fr.cnrs.liris.lumos.domain.thrift.ThriftAdapter import fr.cnrs.liris.lumos.storage.{JobQuery, JobStore} import fr.cnrs.liris.util.scrooge.BinaryScroogeSerializer import scala.collection.mutable private[storage] final class MysqlJobStore(client: Client, statsReceiver: StatsReceiver) extends JobStore { private[this] val sizeStat = statsReceiver.stat("storage", "job", "content_size_kb") override def create(job: Job): Future[Status] = { val content = encode(job) sizeStat.add(StorageUnit.fromBytes(content.length).inKilobytes) client .prepare(MysqlJobStore.InsertQuery) .apply( job.name, job.createTime.getMillis, job.status.state.name, job.owner.map(wrap(_)).getOrElse(NullParameter), content) .flatMap { case _: OK => val fs = job.labels.toSeq.map { case (k, v) => client.prepare(MysqlJobStore.InsertLabelQuery).apply(job.name, k, v) } Future.join(fs).map(_ => Status.Ok) case res => throw new RuntimeException(s"Unexpected MySQL result: $res") } .handle { // Error code 1062 corresponds to a duplicate entry, which means the object already exists. case ServerError(1062, _, _) => Status.AlreadyExists(job.name) } } override def replace(job: Job): Future[Status] = { // Create time, owner and labels are all immutable, so we do not update their denormalizations. // Only the state might be updated later on. val content = encode(job) sizeStat.add(StorageUnit.fromBytes(content.length).inKilobytes) client .prepare(MysqlJobStore.ReplaceQuery) .apply(job.status.state.name, content, job.name) .map { case ok: OK => if (ok.affectedRows == 1) Status.Ok else Status.NotFound(job.name) case res => throw new RuntimeException(s"Unexpected MySQL result: $res") } } override def delete(name: String): Future[Status] = { client .prepare(MysqlJobStore.DeleteQuery) .apply(name) .map { case ok: OK => if (ok.affectedRows == 1) Status.Ok else Status.NotFound(name) case res => throw new RuntimeException(s"Unexpected MySQL result: $res") } } override def list(query: JobQuery, limit: Option[Int], offset: Option[Int]): Future[JobList] = { val where = mutable.ListBuffer.empty[String] val params = mutable.ListBuffer.empty[Parameter] query.owner.foreach { owner => where += "owner = ?" params += owner } if (query.state.nonEmpty) { where += s"state in ${sqlSet(query.state)}" query.state.foreach(v => params += v.name) } query.labels.foreach { selector => selector.requirements.foreach { req => req.op match { case LabelSelector.Absent => where += "(select count(1) from jobs_labels where name = t.name and label_key = ?) = 0" params += req.key case LabelSelector.Present => where += "(select count(1) from jobs_labels where name = t.name and label_key = ?) > 0" params += req.key case LabelSelector.In => // We assume that the selector is valid and hence `values` is not empty (otherwise the // SQL clause `in()` would be problematic). where += "(select label_value from jobs_labels where name = t.name and label_key = ?) " + s"in (${Seq.fill(req.values.size)("?").mkString(", ")})" params += req.key req.values.foreach(v => params += v) case LabelSelector.NotIn => // We prefer re-using the same sub-query twice (`getValueQuery`) instead of performing // a count first (as in the absent case above), because we expect MySQL to optimize // when the same sub-query appears twice and only execute it once. val getValueQuery = "(select label_value from jobs_labels where name = t.name and label_key = ?)" // We assume that the selector is valid and hence `values` is not empty (otherwise the // SQL clause `in()` would be problematic). where += s"($getValueQuery is null or $getValueQuery not in ${sqlSet(req.values)})" params += req.key params += req.key req.values.foreach(v => params += v) } } } var sql = s"select content from jobs t where ${if (where.nonEmpty) where.mkString(" and ") else "true"} order by create_time desc" (offset, limit) match { case (Some(o), Some(l)) => sql += s" limit $l offset $o" case (Some(o), None) => sql += s" limit ${Int.MaxValue} offset $o" case (None, Some(l)) => sql += s" limit $l" case (None, None) => // Do nothing. } val sql2 = s"select count(1) from jobs t where ${if (where.nonEmpty) where.mkString(" and ") else "true"}" Future.join( client.prepare(sql).select(params: _*)(decodeJob), client.prepare(sql2).select(params: _*)(decodeCount).map(_.head)) .map { case (jobs, totalCount) => JobList(jobs, totalCount) } } override def get(name: String): Future[Option[Job]] = { client .prepare(MysqlJobStore.GetQuery) .select(name)(decodeJob) .map(_.headOption) } private def encode(job: Job): Array[Byte] = { val obj = ThriftAdapter.toThrift(job) BinaryScroogeSerializer.toBytes(obj) } private def decodeJob(row: Row): Job = row("content").get match { case raw: RawValue => val obj = BinaryScroogeSerializer.fromBytes(raw.bytes, thrift.Job) ThriftAdapter.toDomain(obj) case v => throw new RuntimeException(s"Unexpected content value: $v") } private def decodeCount(row: Row): Long = row.values.head match { case IntValue(v) => v case LongValue(v) => v case v => throw new RuntimeException(s"Unexpected content value: $v") } override def startUp(): Future[Unit] = { val fs = MysqlJobStore.Ddl.map(ddl => client.query(ddl).unit) Future.join(fs) } override def close(deadline: Time): Future[Unit] = client.close(deadline) private def sqlSet(elements: Iterable[_]) = '(' + Seq.fill(elements.size)("?").mkString(", ") + ')' } object MysqlJobStore { private val Ddl = Seq( "create table if not exists jobs(" + "unused_id int not null auto_increment," + "name varchar(255) not null," + "state varchar(15) not null," + "owner varchar(255) null," + "create_time bigint not null," + // Mediumblob is up to 16Mb. "content mediumblob not null," + "primary key (unused_id)," + "unique key uix_name(name)" + ") engine=InnoDB default charset=utf8", "create table if not exists jobs_labels(" + "unused_id int not null auto_increment," + "name varchar(255) not null," + "label_key varchar(255) not null," + "label_value varchar(255) not null," + "primary key (unused_id)," + "key uix_name_label_key(name, label_key)" + ") engine=InnoDB default charset=utf8") private val GetQuery = "select content from jobs where name = ?" private val DeleteQuery = "delete from jobs where name = ?" private val ReplaceQuery = "update jobs set state = ?, content = ? where name = ?" private val InsertQuery = "insert into jobs(name, create_time, state, owner, content) values(?, ?, ?, ?, ?)" private val InsertLabelQuery = "insert into jobs_labels(name, label_key, label_value) values(?, ?, ?)" }
privamov/accio
accio/java/fr/cnrs/liris/lumos/storage/mysql/MysqlJobStore.scala
Scala
gpl-3.0
8,534
/* * sbt * Copyright 2011 - 2018, Lightbend, Inc. * Copyright 2008 - 2010, Mark Harrah * Licensed under Apache License 2.0 (see LICENSE) */ package sbt.internal.classpath import java.io.File import java.lang.management.ManagementFactory import java.lang.ref.{ Reference, ReferenceQueue, SoftReference } import java.net.URLClassLoader import java.util.concurrent.atomic.{ AtomicInteger, AtomicReference } import sbt.internal.inc.classpath.{ AbstractClassLoaderCache, ClassLoaderCache => IncClassLoaderCache } import sbt.internal.inc.{ AnalyzingCompiler, ZincUtil } import sbt.io.IO import xsbti.ScalaProvider import xsbti.compile.{ ClasspathOptions, ScalaInstance } import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.util.control.NonFatal private object ClassLoaderCache { private def threadID = new AtomicInteger(0) } private[sbt] class ClassLoaderCache( val parent: ClassLoader, private val miniProvider: Option[(File, ClassLoader)] ) extends AbstractClassLoaderCache { private[this] val parentHolder = new AtomicReference(parent) def commonParent = parentHolder.get() def setParent(parent: ClassLoader): Unit = parentHolder.set(parent) def this(commonParent: ClassLoader) = this(commonParent, None) def this(scalaProvider: ScalaProvider) = this(scalaProvider.launcher.topLoader, { scalaProvider.jars.find(_.getName == "scala-library.jar").flatMap { lib => val clazz = scalaProvider.getClass try { val loader = clazz.getDeclaredMethod("libraryLoaderOnly").invoke(scalaProvider) Some(lib -> loader.asInstanceOf[ClassLoader]) } catch { case NonFatal(_) => None } } }) private val scalaProviderKey = miniProvider.map { case (f, cl) => new Key((f -> IO.getModifiedTimeOrZero(f)) :: Nil, commonParent) { override def toClassLoader: ClassLoader = cl } } private class Key(val fileStamps: Seq[(File, Long)], val parent: ClassLoader) { def this(files: List[File], parent: ClassLoader) = this(files.map(f => f -> IO.getModifiedTimeOrZero(f)), parent) def this(files: List[File]) = this(files, commonParent) lazy val files: Seq[File] = fileStamps.map(_._1) lazy val maxStamp: Long = fileStamps.maxBy(_._2)._2 class CachedClassLoader extends URLClassLoader(fileStamps.map(_._1.toURI.toURL).toArray, parent) { override def toString: String = s"CachedClassloader {\\n parent: $parent\\n urls:\\n" + getURLs.mkString(" ", "\\n", "\\n}") } def toClassLoader: ClassLoader = new CachedClassLoader override def equals(o: Any): Boolean = o match { case that: Key => this.fileStamps == that.fileStamps && this.parent == that.parent } override def hashCode(): Int = (fileStamps.hashCode * 31) ^ parent.hashCode override def toString: String = s"Key(${fileStamps mkString ","}, $parent)" } private[this] val delegate = new java.util.concurrent.ConcurrentHashMap[Key, Reference[ClassLoader]]() private[this] val referenceQueue = new ReferenceQueue[ClassLoader] private[this] def clearExpiredLoaders(): Unit = lock.synchronized { val clear = (k: Key, ref: Reference[ClassLoader]) => { ref.get() match { case w: WrappedLoader => w.invalidate() case _ => } delegate.remove(k) () } def isInvalidated(classLoader: ClassLoader): Boolean = classLoader match { case w: WrappedLoader => w.invalidated() case _ => false } delegate.asScala.groupBy { case (k, _) => k.parent -> k.files.toSet }.foreach { case (_, pairs) if pairs.size > 1 => val max = pairs.map(_._1.maxStamp).max pairs.foreach { case (k, v) => if (k.maxStamp != max) clear(k, v) } case _ => } delegate.forEach((k, v) => if (isInvalidated(k.parent)) clear(k, v)) } private[this] class CleanupThread(private[this] val id: Int) extends Thread(s"classloader-cache-cleanup-$id") { setDaemon(true) start() @tailrec override final def run(): Unit = { val stop = try { referenceQueue.remove(1000) match { case ClassLoaderReference(key, classLoader) => close(classLoader) delegate.remove(key) () case _ => } clearExpiredLoaders() false } catch { case _: InterruptedException => true } if (!stop) run() } } /* * We need to manage the cache differently depending on whether or not sbt is started up with * -XX:MaxMetaspaceSize=XXX. The reason is that when the metaspace limit is reached, the jvm * will run a few Full GCs that will clear SoftReferences so that it can cleanup any classes * that only softly reachable. If the GC during this phase is able to collect a classloader, it * will free the metaspace (or at least some of it) previously occupied by the loader. This can * prevent sbt from crashing with an OOM: Metaspace. The issue with this is that when a loader * is collected in this way, it will leak handles to its url classpath. To prevent the resource * leak, we can store a reference to a wrapper loader. That reference, in turn, holds a * strong reference to the underlying loader. Under heap memory pressure, the jvm will clear the * soft reference for the wrapped loader and add it to the reference queue. We add a thread * that reads from the reference queue and closes the underlying URLClassLoader, preventing the * resource leak. When the system is under heap memory pressure, this eviction approach works * well. The problem is that we cannot prevent OOM: MetaSpace because the jvm doesn't give us * a long enough window to clear the ClassLoader references. The wrapper class will get cleared * during the Metaspace Full GC window, but, even though we quickly clear the strong reference * to the underlying classloader and close it, the jvm gives up and crashes with an OOM. * * To avoid these crashes, if the user starts with a limit on metaspace size via * -XX:MetaSpaceSize=XXX, we will just store direct soft references to the URLClassLoader and * leak url classpath handles when loaders are evicted by garbage collection. This is consistent * with the behavior of sbt versions < 1.3.0. In general, these leaks are probably not a big deal * except on windows where they prevent any files for which the leaked class loader has an open * handle from being modified. On linux and mac, we probably leak some file descriptors but it's * fairly uncommon for sbt to run out of file descriptors. * */ private[this] val metaspaceIsLimited = ManagementFactory.getMemoryPoolMXBeans.asScala .exists(b => (b.getName == "Metaspace") && (b.getUsage.getMax > 0)) private[this] val mkReference: (Key, ClassLoader) => Reference[ClassLoader] = if (metaspaceIsLimited) { (_, cl) => (new SoftReference[ClassLoader](cl, referenceQueue): Reference[ClassLoader]) } else ClassLoaderReference.apply private[this] val cleanupThread = new CleanupThread(ClassLoaderCache.threadID.getAndIncrement()) private[this] val lock = new Object private def close(classLoader: ClassLoader): Unit = classLoader match { case a: AutoCloseable => a.close() case _ => } private case class ClassLoaderReference(key: Key, classLoader: ClassLoader) extends SoftReference[ClassLoader]( new WrappedLoader(classLoader), referenceQueue ) def apply( files: List[(File, Long)], parent: ClassLoader, mkLoader: () => ClassLoader ): ClassLoader = { val key = new Key(files, parent) get(key, mkLoader) } def apply(files: List[File], parent: ClassLoader): ClassLoader = { val key = new Key(files, parent) get(key, () => key.toClassLoader) } override def apply(files: List[File]): ClassLoader = { files match { case d :: s :: Nil if d.getName.startsWith("dotty-library") || d.getName.startsWith("scala3-library") => apply(files, classOf[org.jline.terminal.Terminal].getClassLoader) case _ => val key = new Key(files) get(key, () => key.toClassLoader) } } override def cachedCustomClassloader( files: List[File], mkLoader: () => ClassLoader ): ClassLoader = { val key = new Key(files) get(key, mkLoader) } private[this] def get(key: Key, f: () => ClassLoader): ClassLoader = { scalaProviderKey match { case Some(k) if k == key => k.toClassLoader case _ => def addLoader(): ClassLoader = { val ref = mkReference(key, f()) val loader = ref.get delegate.put(key, ref) clearExpiredLoaders() loader } lock.synchronized { delegate.get(key) match { case null => addLoader() case ref => ref.get match { case null => addLoader() case l => l } } } } } private def clear(lock: Object): Unit = { delegate.asScala.foreach { case (_, ClassLoaderReference(_, classLoader)) => close(classLoader) case (_, r: Reference[ClassLoader]) => r.get match { case null => case classLoader => close(classLoader) } case (_, _) => } delegate.clear() } /** * Clears any ClassLoader instances from the internal cache and closes them. Calling this * method will not stop the cleanup thread. Call [[close]] to fully clean up this cache. */ def clear(): Unit = lock.synchronized(clear(lock)) /** * Completely shuts down this cache. It stops the background thread for cleaning up classloaders * * Clears any ClassLoader instances from the internal cache and closes them. It also * method will not stop the cleanup thread. Call [[close]] to fully clean up this cache. */ override def close(): Unit = lock.synchronized { cleanupThread.interrupt() cleanupThread.join() clear(lock) } } private[sbt] object AlternativeZincUtil { def scalaCompiler( scalaInstance: ScalaInstance, compilerBridgeJar: File, classpathOptions: ClasspathOptions, classLoaderCache: Option[IncClassLoaderCache] ): AnalyzingCompiler = { val bridgeProvider = ZincUtil.constantBridgeProvider(scalaInstance, compilerBridgeJar) new AnalyzingCompiler( scalaInstance, bridgeProvider, classpathOptions, _ => (), classLoaderCache ) } }
xuwei-k/xsbt
main-command/src/main/scala/sbt/internal/classpath/ClassLoaderCache.scala
Scala
apache-2.0
10,604
package com.ee.utils import java.io.File import org.specs2.mutable.Specification import java.net.URL class fileTest extends Specification { import com.ee.utils.file._ "name and suffix" should { "return the name and suffix as a tuple" in { nameAndSuffix("file.txt") ===("file", "txt") nameAndSuffix("thing.file.txt") ===("thing.file", "txt") nameAndSuffix(".gitignore") ===(".gitignore", "") nameAndSuffix("some_bad_file_name.") ===("some_bad_file_name", "") nameAndSuffix("some_bad_file_name...") ===("some_bad_file_name..", "") } } "distinct files" should { def toFiles(files: List[String]): List[File] = { files.map(new File(_)) } def relativize(files: List[File]): List[String] = { val root = new File(".").getAbsolutePath files.map(_.getCanonicalPath.replace(root.substring(0, root.length() - 1), "")) } "list only distinct files" in { val root = "test/public/com/ee/utils/file" val paths = List(root + "/testOne", root + "/testOne/one.js", root + "/one.js") val out = distinctFiles(toFiles(paths): _*) //converting a string to a File makes the path os-dependent, that is why we do it on both sides toFiles(relativize(out)) === toFiles(List(root + "/testOne/one.js", root + "/one.js")) } } "common root folder" should { "empty for empty args" in commonRootFolder() === "" "return parent for single query" in commonRootFolder("a/b") == "a" "a/b" in commonRootFolder("a/b/c.txt", "a/b/c/d.txt") === "a/b" "empty for no common root" in commonRootFolder("z/b/c.txt", "a/b/c/d.txt") === "" "parent" in commonRootFolder("a/a.txt") === "a" "parent" in commonRootFolder("a/b.b/a.txt") === "a/b.b" "parent" in commonRootFolder("a/b.b/a.txt", "a/b.b/c.c/t.txt") === "a/b.b" "n paths" in { commonRootFolder( "a/b/c/d/e/f.txt", "a/b/c/d.txt", "a/b/c/d/e/f/g", "a/b/c/d/z/f/g/x.txt" ) === "a/b/c" commonRootFolder( "/a/b/c/d/e/f.txt", "/a/b/c/d.txt", "/a/b/c/d/e/f/g", "/a/b/c/d/z/f/g/x.txt" ) === "/a/b/c" } "leading slash is different to no slash" in commonRootFolder( "/a/b/c.txt", "a/b/c.txt" ) === "" } }
edeustace/assets-loader
plugin/test/com/ee/utils/fileTest.scala
Scala
mit
2,306
object Solution { def palindromeIndex(s: String): Int = { def isPalindrome(s: String): Boolean = s == s.reverse if (isPalindrome(s)) return -1 val n = s.length val srev = s.reverse var i = 0 while (s(i) == srev(i)) i += 1 // remove i val ns = s.substring(0, i) + s.substring(i + 1) if (isPalindrome(ns)) i else n - 1 - i } def main(args: Array[String]) { for (_ <- 1 to readInt) { val s = readLine println(palindromeIndex(s)) } } }
advancedxy/hackerrank
algorithms/strings/PalinromeIndex.scala
Scala
mit
498
/* * This file is part of CubeLoader. * Copyright (c) 2016 - 2017, KitsuneAlex, All rights reserved. * * CubeLoader is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * CubeLoader is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with CubeLoader. If not, see <http://www.gnu.org/licenses/lgpl>. */ package de.keri.cubelib.item.armor import de.keri.cubelib.item.TDefaultItemImpl import net.minecraft.client.renderer.texture.{TextureAtlasSprite, TextureMap} import net.minecraft.creativetab.CreativeTabs import net.minecraft.inventory.EntityEquipmentSlot import net.minecraft.item.ItemArmor.ArmorMaterial import net.minecraft.item.{ItemArmor, ItemStack} import net.minecraft.util.{NonNullList, ResourceLocation} import net.minecraftforge.fml.relauncher.{Side, SideOnly} class ItemBootsBase(itemName: String, material: ArmorMaterial) extends ItemArmor(material, 0, EntityEquipmentSlot.FEET) with TDefaultItemImpl { private var modid: String = _ @SideOnly(Side.CLIENT) private var texture: TextureAtlasSprite = _ override def getSubItems(tab: CreativeTabs, items: NonNullList[ItemStack]): Unit = if (tab == getCreativeTab) items.add(new ItemStack(this, 1, 0)) @SideOnly(Side.CLIENT) override def registerTextures(map: TextureMap): Unit = texture = map.registerSprite(new ResourceLocation(modid, s"items/armor/$itemName")) @SideOnly(Side.CLIENT) override def getTexture(meta: Int): TextureAtlasSprite = texture @SideOnly(Side.CLIENT) override def getTexture(stack: ItemStack): TextureAtlasSprite = null override def getModid: String = modid override def getItemName: String = itemName override def setModid(modid: String): Unit = this.modid = modid }
TeamMD5/CubeLoader
src/main/scala/de/keri/cubelib/item/armor/ItemBootsBase.scala
Scala
gpl-3.0
2,214
/* sbt -- Simple Build Tool * Copyright 2010 Mark Harrah */ package sbt.internal.util package complete import History.number import java.io.File final class History private (val lines: IndexedSeq[String], val path: Option[File], error: String => Unit) extends NotNull { private def reversed = lines.reverse def all: Seq[String] = lines def size = lines.length def !! : Option[String] = !-(1) def apply(i: Int): Option[String] = if (0 <= i && i < size) Some(lines(i)) else { sys.error("Invalid history index: " + i) } def !(i: Int): Option[String] = apply(i) def !(s: String): Option[String] = number(s) match { case Some(n) => if (n < 0) !-(-n) else apply(n) case None => nonEmpty(s) { reversed.find(_.startsWith(s)) } } def !-(n: Int): Option[String] = apply(size - n - 1) def !?(s: String): Option[String] = nonEmpty(s) { reversed.drop(1).find(_.contains(s)) } private def nonEmpty[T](s: String)(act: => Option[T]): Option[T] = if (s.isEmpty) sys.error("No action specified to history command") else act def list(historySize: Int, show: Int): Seq[String] = lines.toList.drop(scala.math.max(0, lines.size - historySize)).zipWithIndex.map { case (line, number) => " " + number + " " + line }.takeRight(show max 1) } object History { def apply(lines: Seq[String], path: Option[File], error: String => Unit): History = new History(lines.toIndexedSeq, path, sys.error) def number(s: String): Option[Int] = try { Some(s.toInt) } catch { case e: NumberFormatException => None } }
Duhemm/util
internal/util-complete/src/main/scala/sbt/internal/util/complete/History.scala
Scala
bsd-3-clause
1,571
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala import org.apache.flink.annotation.{Public, PublicEvolving} import org.apache.flink.api.common.functions.{AggregateFunction, FoldFunction, ReduceFunction} import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.streaming.api.datastream.{AllWindowedStream => JavaAllWStream} import org.apache.flink.streaming.api.functions.aggregation.AggregationFunction.AggregationType import org.apache.flink.streaming.api.functions.aggregation.{ComparableAggregator, SumAggregator} import org.apache.flink.streaming.api.scala.function.{AllWindowFunction, ProcessAllWindowFunction} import org.apache.flink.streaming.api.scala.function.util.{ScalaAllWindowFunction, ScalaAllWindowFunctionWrapper, ScalaFoldFunction, ScalaProcessAllWindowFunctionWrapper, ScalaReduceFunction} import org.apache.flink.streaming.api.windowing.evictors.Evictor import org.apache.flink.streaming.api.windowing.time.Time import org.apache.flink.streaming.api.windowing.triggers.Trigger import org.apache.flink.streaming.api.windowing.windows.Window import org.apache.flink.util.{Collector, OutputTag} import org.apache.flink.util.Preconditions.checkNotNull /** * A [[AllWindowedStream]] represents a data stream where the stream of * elements is split into windows based on a * [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]]. Window emission * is triggered based on a [[Trigger]]. * * If an [[Evictor]] is specified it will be * used to evict elements from the window after * evaluation was triggered by the [[Trigger]] but before the actual evaluation of the window. * When using an evictor window performance will degrade significantly, since * pre-aggregation of window results cannot be used. * * Note that the [[AllWindowedStream()]] is purely and API construct, during runtime * the [[AllWindowedStream()]] will be collapsed together with the * operation over the window into one single operation. * * @tparam T The type of elements in the stream. * @tparam W The type of [[Window]] that the * [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]] * assigns the elements to. */ @Public class AllWindowedStream[T, W <: Window](javaStream: JavaAllWStream[T, W]) { /** * Sets the allowed lateness to a user-specified value. * If not explicitly set, the allowed lateness is [[0L]]. * Setting the allowed lateness is only valid for event-time windows. * If a value different than 0 is provided with a processing-time * [[org.apache.flink.streaming.api.windowing.assigners.WindowAssigner]], * then an exception is thrown. */ @PublicEvolving def allowedLateness(lateness: Time): AllWindowedStream[T, W] = { javaStream.allowedLateness(lateness) this } /** * Send late arriving data to the side output identified by the given [[OutputTag]]. Data * is considered late after the watermark has passed the end of the window plus the allowed * lateness set using [[allowedLateness(Time)]]. * * You can get the stream of late data using [[DataStream.getSideOutput()]] on the [[DataStream]] * resulting from the windowed operation with the same [[OutputTag]]. */ @PublicEvolving def sideOutputLateData(outputTag: OutputTag[T]): AllWindowedStream[T, W] = { javaStream.sideOutputLateData(outputTag) this } /** * Sets the [[Trigger]] that should be used to trigger window emission. */ @PublicEvolving def trigger(trigger: Trigger[_ >: T, _ >: W]): AllWindowedStream[T, W] = { javaStream.trigger(trigger) this } /** * Sets the [[Evictor]] that should be used to evict elements from a window before emission. * * Note: When using an evictor window performance will degrade significantly, since * pre-aggregation of window results cannot be used. */ @PublicEvolving def evictor(evictor: Evictor[_ >: T, _ >: W]): AllWindowedStream[T, W] = { javaStream.evictor(evictor) this } // ------------------------------------------------------------------------ // Operations on the windows // ------------------------------------------------------------------------ // ---------------------------- reduce() ------------------------------------ /** * Applies a reduce function to the window. The window function is called for each evaluation * of the window for each key individually. The output of the reduce function is interpreted * as a regular non-windowed stream. * * This window will try and pre-aggregate data as much as the window policies permit. For example, * tumbling time windows can perfectly pre-aggregate the data, meaning that only one element per * key is stored. Sliding time windows will pre-aggregate on the granularity of the slide * interval, so a few elements are stored per key (one per slide interval). * Custom windows may not be able to pre-aggregate, or may need to store extra values in an * aggregation tree. * * @param function The reduce function. * @return The data stream that is the result of applying the reduce function to the window. */ def reduce(function: ReduceFunction[T]): DataStream[T] = { asScalaStream(javaStream.reduce(clean(function))) } /** * Applies a reduce function to the window. The window function is called for each evaluation * of the window for each key individually. The output of the reduce function is interpreted * as a regular non-windowed stream. * * This window will try and pre-aggregate data as much as the window policies permit. For example, * tumbling time windows can perfectly pre-aggregate the data, meaning that only one element per * key is stored. Sliding time windows will pre-aggregate on the granularity of the slide * interval, so a few elements are stored per key (one per slide interval). * Custom windows may not be able to pre-aggregate, or may need to store extra values in an * aggregation tree. * * @param function The reduce function. * @return The data stream that is the result of applying the reduce function to the window. */ def reduce(function: (T, T) => T): DataStream[T] = { if (function == null) { throw new NullPointerException("Reduce function must not be null.") } val cleanFun = clean(function) val reducer = new ScalaReduceFunction[T](cleanFun) reduce(reducer) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ def reduce[R: TypeInformation]( preAggregator: ReduceFunction[T], windowFunction: AllWindowFunction[T, R, W]): DataStream[R] = { val cleanedReducer = clean(preAggregator) val cleanedWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunctionWrapper[T, R, W](cleanedWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.reduce(cleanedReducer, applyFunction, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ def reduce[R: TypeInformation]( preAggregator: (T, T) => T, windowFunction: (W, Iterable[T], Collector[R]) => Unit): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanReducer = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val reducer = new ScalaReduceFunction[T](cleanReducer) val applyFunction = new ScalaAllWindowFunction[T, R, W](cleanWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.reduce(reducer, applyFunction, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The process window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def reduce[R: TypeInformation]( preAggregator: ReduceFunction[T], windowFunction: ProcessAllWindowFunction[T, R, W]): DataStream[R] = { val cleanedReducer = clean(preAggregator) val cleanedWindowFunction = clean(windowFunction) val applyFunction = new ScalaProcessAllWindowFunctionWrapper[T, R, W](cleanedWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.reduce(cleanedReducer, applyFunction, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The process window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def reduce[R: TypeInformation]( preAggregator: (T, T) => T, windowFunction: ProcessAllWindowFunction[T, R, W]): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanReducer = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val reducer = new ScalaReduceFunction[T](cleanReducer) val applyFunction = new ScalaProcessAllWindowFunctionWrapper[T, R, W](cleanWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.reduce(reducer, applyFunction, returnType)) } // --------------------------- aggregate() ---------------------------------- /** * Applies the given aggregation function to each window. The aggregation function * is called for each element, aggregating values incrementally and keeping the state to * one accumulator per window. * * @param aggregateFunction The aggregation function. * @return The data stream that is the result of applying the fold function to the window. */ @PublicEvolving def aggregate[ACC: TypeInformation, R: TypeInformation]( aggregateFunction: AggregateFunction[T, ACC, R]): DataStream[R] = { checkNotNull(aggregateFunction, "AggregationFunction must not be null") val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val resultType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.aggregate( clean(aggregateFunction), accumulatorType, resultType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given aggregation function. * * @param preAggregator The aggregation function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation]( preAggregator: AggregateFunction[T, ACC, V], windowFunction: AllWindowFunction[V, R, W]): DataStream[R] = { checkNotNull(preAggregator, "AggregationFunction must not be null") checkNotNull(windowFunction, "Window function must not be null") val cleanedPreAggregator = clean(preAggregator) val cleanedWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunctionWrapper[V, R, W](cleanedWindowFunction) val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]] val resultType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.aggregate( cleanedPreAggregator, applyFunction, accumulatorType, aggregationResultType, resultType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given aggregation function. * * @param preAggregator The aggregation function that is used for pre-aggregation * @param windowFunction The process window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation] (preAggregator: AggregateFunction[T, ACC, V], windowFunction: ProcessAllWindowFunction[V, R, W]): DataStream[R] = { checkNotNull(preAggregator, "AggregationFunction must not be null") checkNotNull(windowFunction, "Window function must not be null") val cleanedPreAggregator = clean(preAggregator) val cleanedWindowFunction = clean(windowFunction) val applyFunction = new ScalaProcessAllWindowFunctionWrapper[V, R, W](cleanedWindowFunction) val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]] val resultType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.aggregate( cleanedPreAggregator, applyFunction, accumulatorType, aggregationResultType, resultType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given aggregation function. * * @param preAggregator The aggregation function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def aggregate[ACC: TypeInformation, V: TypeInformation, R: TypeInformation]( preAggregator: AggregateFunction[T, ACC, V], windowFunction: (W, Iterable[V], Collector[R]) => Unit): DataStream[R] = { checkNotNull(preAggregator, "AggregationFunction must not be null") checkNotNull(windowFunction, "Window function must not be null") val cleanPreAggregator = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunction[V, R, W](cleanWindowFunction) val accumulatorType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val aggregationResultType: TypeInformation[V] = implicitly[TypeInformation[V]] val resultType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.aggregate( cleanPreAggregator, applyFunction, accumulatorType, aggregationResultType, resultType)) } // ----------------------------- fold() ------------------------------------- /** * Applies the given fold function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the reduce function is * interpreted as a regular non-windowed stream. * * @param function The fold function. * @return The data stream that is the result of applying the fold function to the window. */ def fold[R: TypeInformation]( initialValue: R, function: FoldFunction[T,R]): DataStream[R] = { if (function == null) { throw new NullPointerException("Fold function must not be null.") } val resultType : TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.fold(initialValue, function, resultType)) } /** * Applies the given fold function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the reduce function is * interpreted as a regular non-windowed stream. * * @param function The fold function. * @return The data stream that is the result of applying the fold function to the window. */ def fold[R: TypeInformation](initialValue: R)(function: (R, T) => R): DataStream[R] = { if (function == null) { throw new NullPointerException("Fold function must not be null.") } val cleanFun = clean(function) val folder = new ScalaFoldFunction[T,R](cleanFun) fold(initialValue, folder) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ def fold[ACC: TypeInformation, R: TypeInformation]( initialValue: ACC, preAggregator: FoldFunction[T, ACC], windowFunction: AllWindowFunction[ACC, R, W]): DataStream[R] = { val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunctionWrapper[ACC, R, W](cleanWindowFunction) asScalaStream(javaStream.fold( initialValue, cleanFolder, applyFunction, implicitly[TypeInformation[ACC]], implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The process window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def fold[ACC: TypeInformation, R: TypeInformation]( initialValue: ACC, preAggregator: FoldFunction[T, ACC], windowFunction: ProcessAllWindowFunction[ACC, R, W]): DataStream[R] = { val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val applyFunction = new ScalaProcessAllWindowFunctionWrapper[ACC, R, W](cleanWindowFunction) asScalaStream(javaStream.fold( initialValue, cleanFolder, applyFunction, implicitly[TypeInformation[ACC]], implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ def fold[ACC: TypeInformation, R: TypeInformation]( initialValue: ACC, preAggregator: (ACC, T) => ACC, windowFunction: (W, Iterable[ACC], Collector[R]) => Unit): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val folder = new ScalaFoldFunction[T, ACC](cleanFolder) val applyFunction = new ScalaAllWindowFunction[ACC, R, W](cleanWindowFunction) val accType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.fold(initialValue, folder, applyFunction, accType, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def fold[ACC: TypeInformation, R: TypeInformation]( initialValue: ACC, preAggregator: (ACC, T) => ACC, windowFunction: ProcessAllWindowFunction[ACC, R, W]): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val folder = new ScalaFoldFunction[T, ACC](cleanFolder) val applyFunction = new ScalaProcessAllWindowFunctionWrapper[ACC, R, W](cleanWindowFunction) val accType: TypeInformation[ACC] = implicitly[TypeInformation[ACC]] val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.fold(initialValue, folder, applyFunction, accType, returnType)) } // ---------------------------- apply() ------------------------------------- /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Not that this function requires that all data in the windows is buffered until the window * is evaluated, as the function provides no means of pre-aggregation. * * @param function The process window function. * @return The data stream that is the result of applying the window function to the window. */ @PublicEvolving def process[R: TypeInformation]( function: ProcessAllWindowFunction[T, R, W]): DataStream[R] = { val cleanedFunction = clean(function) val javaFunction = new ScalaProcessAllWindowFunctionWrapper[T, R, W](cleanedFunction) asScalaStream(javaStream.process(javaFunction, implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Not that this function requires that all data in the windows is buffered until the window * is evaluated, as the function provides no means of pre-aggregation. * * @param function The window function. * @return The data stream that is the result of applying the window function to the window. */ def apply[R: TypeInformation]( function: AllWindowFunction[T, R, W]): DataStream[R] = { val cleanedFunction = clean(function) val javaFunction = new ScalaAllWindowFunctionWrapper[T, R, W](cleanedFunction) asScalaStream(javaStream.apply(javaFunction, implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Not that this function requires that all data in the windows is buffered until the window * is evaluated, as the function provides no means of pre-aggregation. * * @param function The window function. * @return The data stream that is the result of applying the window function to the window. */ def apply[R: TypeInformation]( function: (W, Iterable[T], Collector[R]) => Unit): DataStream[R] = { val cleanedFunction = clean(function) val applyFunction = new ScalaAllWindowFunction[T, R, W](cleanedFunction) asScalaStream(javaStream.apply(applyFunction, implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. * @deprecated Use [[reduce(ReduceFunction, AllWindowFunction)]] instead. */ @deprecated def apply[R: TypeInformation]( preAggregator: ReduceFunction[T], windowFunction: AllWindowFunction[T, R, W]): DataStream[R] = { val cleanedReducer = clean(preAggregator) val cleanedWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunctionWrapper[T, R, W](cleanedWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.apply(cleanedReducer, applyFunction, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation reducer. * * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. * @deprecated Use [[reduce(ReduceFunction, AllWindowFunction)]] instead. */ @deprecated def apply[R: TypeInformation]( preAggregator: (T, T) => T, windowFunction: (W, Iterable[T], Collector[R]) => Unit): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanReducer = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val reducer = new ScalaReduceFunction[T](cleanReducer) val applyFunction = new ScalaAllWindowFunction[T, R, W](cleanWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.apply(reducer, applyFunction, returnType)) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. * @deprecated Use [[fold(R, FoldFunction, AllWindowFunction)]] instead. */ @deprecated def apply[R: TypeInformation]( initialValue: R, preAggregator: FoldFunction[T, R], windowFunction: AllWindowFunction[R, R, W]): DataStream[R] = { val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val applyFunction = new ScalaAllWindowFunctionWrapper[R, R, W](cleanWindowFunction) asScalaStream(javaStream.apply( initialValue, cleanFolder, applyFunction, implicitly[TypeInformation[R]])) } /** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * Arriving data is pre-aggregated using the given pre-aggregation folder. * * @param initialValue Initial value of the fold * @param preAggregator The reduce function that is used for pre-aggregation * @param windowFunction The window function. * @return The data stream that is the result of applying the window function to the window. * @deprecated Use [[fold(R, FoldFunction, AllWindowFunction]] instead. */ @deprecated def apply[R: TypeInformation]( initialValue: R, preAggregator: (R, T) => R, windowFunction: (W, Iterable[R], Collector[R]) => Unit): DataStream[R] = { if (preAggregator == null) { throw new NullPointerException("Reduce function must not be null.") } if (windowFunction == null) { throw new NullPointerException("WindowApply function must not be null.") } val cleanFolder = clean(preAggregator) val cleanWindowFunction = clean(windowFunction) val folder = new ScalaFoldFunction[T, R](cleanFolder) val applyFunction = new ScalaAllWindowFunction[R, R, W](cleanWindowFunction) val returnType: TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(javaStream.apply(initialValue, folder, applyFunction, returnType)) } // ------------------------------------------------------------------------ // Aggregations on the keyed windows // ------------------------------------------------------------------------ /** * Applies an aggregation that that gives the maximum of the elements in the window at * the given position. */ def max(position: Int): DataStream[T] = aggregate(AggregationType.MAX, position) /** * Applies an aggregation that that gives the maximum of the elements in the window at * the given field. */ def max(field: String): DataStream[T] = aggregate(AggregationType.MAX, field) /** * Applies an aggregation that that gives the minimum of the elements in the window at * the given position. */ def min(position: Int): DataStream[T] = aggregate(AggregationType.MIN, position) /** * Applies an aggregation that that gives the minimum of the elements in the window at * the given field. */ def min(field: String): DataStream[T] = aggregate(AggregationType.MIN, field) /** * Applies an aggregation that sums the elements in the window at the given position. */ def sum(position: Int): DataStream[T] = aggregate(AggregationType.SUM, position) /** * Applies an aggregation that sums the elements in the window at the given field. */ def sum(field: String): DataStream[T] = aggregate(AggregationType.SUM, field) /** * Applies an aggregation that that gives the maximum element of the window by * the given position. When equality, returns the first. */ def maxBy(position: Int): DataStream[T] = aggregate(AggregationType.MAXBY, position) /** * Applies an aggregation that that gives the maximum element of the window by * the given field. When equality, returns the first. */ def maxBy(field: String): DataStream[T] = aggregate(AggregationType.MAXBY, field) /** * Applies an aggregation that that gives the minimum element of the window by * the given position. When equality, returns the first. */ def minBy(position: Int): DataStream[T] = aggregate(AggregationType.MINBY, position) /** * Applies an aggregation that that gives the minimum element of the window by * the given field. When equality, returns the first. */ def minBy(field: String): DataStream[T] = aggregate(AggregationType.MINBY, field) private def aggregate(aggregationType: AggregationType, field: String): DataStream[T] = { val position = fieldNames2Indices(getInputType(), Array(field))(0) aggregate(aggregationType, position) } def aggregate(aggregationType: AggregationType, position: Int): DataStream[T] = { val jStream = javaStream.asInstanceOf[JavaAllWStream[Product, W]] val reducer = aggregationType match { case AggregationType.SUM => new SumAggregator(position, jStream.getInputType, jStream.getExecutionEnvironment.getConfig) case _ => new ComparableAggregator( position, jStream.getInputType, aggregationType, true, jStream.getExecutionEnvironment.getConfig) } new DataStream[Product](jStream.reduce(reducer)).asInstanceOf[DataStream[T]] } // ------------------------------------------------------------------------ // Utilities // ------------------------------------------------------------------------ /** * Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning * is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]]. */ private[flink] def clean[F <: AnyRef](f: F): F = { new StreamExecutionEnvironment(javaStream.getExecutionEnvironment).scalaClean(f) } /** * Gets the output type. */ private def getInputType(): TypeInformation[T] = javaStream.getInputType }
DieBauer/flink
flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/AllWindowedStream.scala
Scala
apache-2.0
36,535
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.accounts.frs102.boxes import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever import uk.gov.hmrc.ct.box._ case class AC115A(value: Option[Int]) extends CtBoxIdentifier(name = "Intangible assets - Goodwill - Cost - Additions") with CtOptionalInteger with Input with ValidatableBox[Frs102AccountsBoxRetriever] with Validators { override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = { collectErrors( validateMoney(value, min = 0) ) } }
hmrc/ct-calculations
src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC115A.scala
Scala
apache-2.0
1,140
/** * Licensed to the Minutemen Group under one or more contributor license * agreements. See the COPYRIGHT file distributed with this work for * additional information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * A framework agnostic authentication library for Scala that supports several authentication methods, * including OAuth1, OAuth2, OpenID, Credentials or custom authentication schemes. */ package object silhouette
datalek/silhouette
silhouette/src/main/scala/silhouette/package.scala
Scala
apache-2.0
986
package lmxml package transforms package json import util.parsing.json._ object JSTransform { type Filter = (Any => Boolean) JSON.perThreadNumberParser = { (input: String) => if (input.contains(".")) input.toDouble else input.toInt } object Filters { def onJSON(fun: JSONType => Boolean) = fun.asInstanceOf[Filter] def onArray(fun: List[Any] => Boolean) = onJSON(j => fun(j.asInstanceOf[JSONArray].list)) def onObject(fun: Map[String, Any] => Boolean) = onJSON(j => // required for scala 2.8.x fun(j.asInstanceOf[JSONObject].obj.asInstanceOf[Map[String, Any]]) ) def onValue(fun: String => Boolean) = fun.asInstanceOf[Filter] } def apply(filters: (String, Filter)*) = new JSTransform(Map(filters :_*)) def apply(jsonStr: String): Option[Transform] = apply().get(jsonStr) } case class JSTransform(filters: Map[String, JSTransform.Filter]) { def parse(jsonStr: String) = get(jsonStr).getOrElse(Transform()) def get(jsonStr: String) = { val complete = (pros: Seq[(String,Processor)]) => Transform(pros: _*) val fullyTransform = transform _ andThen complete JSON.parseRaw(jsonStr).map(fullyTransform) } def tupleFilter(key: String, value: Any) = key -> filters.get(key).map(applyFilter(_, value)).getOrElse(element(value)) def applyFilter(filter: JSTransform.Filter, value: Any) = if (filter(value)) element(value) else Empty def transform(parsed: Any): Seq[(String, Processor)] = parsed match { case JSONObject(obj) => obj.toList.flatMap(transform) case (key: String, value) => Seq(tupleFilter(key, value)) case _ => Seq("element" -> element(parsed)) } def element(parsed: Any): Processor = parsed match { case obj: JSONObject => Values(transform(obj)) case JSONArray(list) => Foreach(list)(transform) case _ => Value(parsed) } }
philcali/lmxml
json/src/main/scala/json.scala
Scala
mit
1,874
package com.twitter.finagle.netty4.channel import com.twitter.finagle.Failure import com.twitter.finagle.stats.StatsReceiver import com.twitter.util.{Duration, Monitor, Stopwatch} import io.netty.buffer.ByteBuf import io.netty.channel.{ChannelDuplexHandler, ChannelHandlerContext, ChannelPromise} import io.netty.channel.ChannelHandler.Sharable import io.netty.util.AttributeKey import java.io.IOException import java.util.concurrent.atomic.AtomicLong import java.util.logging.{Level, Logger} private[channel] case class ChannelStats(bytesRead: AtomicLong, bytesWritten: AtomicLong) private[netty4] object ChannelStatsHandler { private[channel] val ConnectionStatsKey = AttributeKey.valueOf[ChannelStats]("channel_stats") private[channel] val ConnectionDurationKey = AttributeKey.valueOf[Stopwatch.Elapsed]("connection_duration") private[channel] val ChannelWasWritableKey = AttributeKey.valueOf[Boolean]("channel_has_been_writable") private[channel] val ChannelWritableDurationKey = AttributeKey.valueOf[Stopwatch.Elapsed]("channel_writable_duration") } /** * A [[io.netty.channel.ChannelDuplexHandler]] that tracks channel/connection * statistics. The handler is meant to be shared by all * [[io.netty.channel.Channel Channels]] within a Finagle client or * server in order to consolidate statistics across a number of channels. */ @Sharable private[netty4] class ChannelStatsHandler(statsReceiver: StatsReceiver) extends ChannelDuplexHandler { import ChannelStatsHandler._ private[this] val log = Logger.getLogger(getClass.getName) private[this] val connectionCount: AtomicLong = new AtomicLong() private[this] val connects = statsReceiver.counter("connects") private[this] val connectionDuration = statsReceiver.stat("connection_duration") private[this] val connectionReceivedBytes = statsReceiver.stat("connection_received_bytes") private[this] val connectionSentBytes = statsReceiver.stat("connection_sent_bytes") private[this] val receivedBytes = statsReceiver.counter("received_bytes") private[this] val sentBytes = statsReceiver.counter("sent_bytes") private[this] val writable = statsReceiver.counter("socket_writable_ms") private[this] val unwritable = statsReceiver.counter("socket_unwritable_ms") private[this] val exceptions = statsReceiver.scope("exn") private[this] val closesCount = statsReceiver.counter("closes") private[this] val connections = statsReceiver.addGauge("connections") { connectionCount.get() } override def channelActive(ctx: ChannelHandlerContext): Unit = { ctx.attr(ChannelWasWritableKey).set(true) //netty channels start in writable state ctx.attr(ChannelWritableDurationKey).set(Stopwatch.start()) ctx.attr(ConnectionStatsKey).set(ChannelStats(new AtomicLong(0), new AtomicLong(0))) connects.incr() connectionCount.incrementAndGet() ctx.attr(ConnectionDurationKey).set(Stopwatch.start()) super.channelActive(ctx) } override def write(ctx: ChannelHandlerContext, msg: Object, p: ChannelPromise) { val channelWriteCount = ctx.attr(ConnectionStatsKey).get.bytesWritten msg match { case buffer: ByteBuf => val readableBytes = buffer.readableBytes channelWriteCount.getAndAdd(readableBytes) sentBytes.incr(readableBytes) case _ => log.warning("ChannelStatsHandler received non-channelbuffer write") } super.write(ctx, msg, p) } override def channelRead(ctx: ChannelHandlerContext, msg: Object) { msg match { case buffer: ByteBuf => val channelReadCount = ctx.attr(ConnectionStatsKey).get.bytesRead val readableBytes = buffer.readableBytes channelReadCount.getAndAdd(readableBytes) receivedBytes.incr(readableBytes) case _ => log.warning("ChannelStatsHandler received non-channelbuffer read") } super.channelRead(ctx, msg) } override def close(ctx: ChannelHandlerContext, p: ChannelPromise) { closesCount.incr() super.close(ctx, p) } override def channelInactive(ctx: ChannelHandlerContext) { val channelStats = ctx.attr(ConnectionStatsKey).get connectionReceivedBytes.add(channelStats.bytesRead.get) connectionSentBytes.add(channelStats.bytesWritten.get) val elapsed = ctx.attr(ConnectionDurationKey).get() connectionDuration.add(elapsed().inMilliseconds) connectionCount.decrementAndGet() super.channelInactive(ctx) } override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable) { exceptions.counter(cause.getClass.getName).incr() // If no Monitor is active, then log the exception so we don't fail silently. if (!Monitor.isActive) { val level = cause match { case t: IOException => Level.FINE case f: Failure => f.logLevel case _ => Level.WARNING } log.log(level, "ChannelStatsHandler caught an exception", cause) } super.exceptionCaught(ctx, cause) } override def channelWritabilityChanged(ctx: ChannelHandlerContext): Unit = { val isWritable = ctx.channel.isWritable() val wasWritableAttr = ctx.attr(ChannelWasWritableKey) if (isWritable != wasWritableAttr.get) { val writableDuration = ctx.attr(ChannelWritableDurationKey) val elapsed: Duration = writableDuration.get().apply() val stat = if (wasWritableAttr.get) writable else unwritable stat.incr(elapsed.inMilliseconds.toInt) wasWritableAttr.set(isWritable) writableDuration.set(Stopwatch.start()) } super.channelWritabilityChanged(ctx) } }
sveinnfannar/finagle
finagle-netty4/src/main/scala/com/twitter/finagle/netty4/channel/ChannelStatsHandler.scala
Scala
apache-2.0
5,664
package org.scalatest.examples.funsuite import org.scalatest.FunSuite class SetSuite extends FunSuite { test("An empty Set should have size 0") { assert(Set.empty.size === 0) } test("Invoking head on an empty Set should produce NoSuchElementException") { intercept[NoSuchElementException] { Set.empty.head } } }
hubertp/scalatest
examples/src/main/scala/org/scalatest/examples/funsuite/SetSuite.scala
Scala
apache-2.0
342
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.broker.protocol import org.apache.activemq.apollo.broker.Connector import org.apache.activemq.apollo.util.ClassFinder import org.fusesource.hawtbuf.Buffer import org.fusesource.hawtdispatch.transport.ProtocolCodec object ProtocolCodecFactory { abstract trait Provider { def id: String /** * @return an instance of the wire format. * */ def createProtocolCodec(connector: Connector): ProtocolCodec /** * @return true if this wire format factory is identifiable. An identifiable * protocol will first write a easy to identify header to the stream */ def isIdentifiable: Boolean /** * @return Returns the maximum length of the header used to discriminate the wire format if it * { @link #isIdentifiable()} * @throws UnsupportedOperationException If { @link #isIdentifiable()} is false */ def maxIdentificaionLength: Int /** * Called to test if this protocol matches the identification header. * * @param buffer The byte buffer representing the header data read so far. * @return true if the Buffer matches the protocol format header. */ def matchesIdentification(buffer: Buffer): Boolean } final val providers: ClassFinder[ProtocolCodecFactory.Provider] = new ClassFinder[ProtocolCodecFactory.Provider]("META-INF/services/org.apache.activemq.apollo/protocol-codec-factory.index", classOf[ProtocolCodecFactory.Provider]) /** * Gets the provider. */ def get(name: String): ProtocolCodecFactory.Provider = { import scala.collection.JavaConversions._ for (provider <- providers.jsingletons) { if (name == provider.id) { return provider } } return null } }
chirino/activemq-apollo
apollo-broker/src/main/scala/org/apache/activemq/apollo/broker/protocol/ProtocolCodecFactory.scala
Scala
apache-2.0
2,572
class Test { def rawr(string: String): String = string } implicit final class RawrExt(val t: Test) { def rawr(int: Int): Int = int } val t = new Test val r0 = t.rawr(5) class TestP { def rawr[A](list: List[A]): List[A] = list } implicit final class RawrExtP(val t: TestP) { def rawr(int: Int): Int = int } val tt = new TestP val r1 = tt.rawr(5) // This doesn't compile
som-snytt/dotty
tests/pending/pos/overload-generic.scala
Scala
apache-2.0
384
package fringe import chisel3._ import chisel3.util._ import templates._ class ScatterBuffer( val streamW: Int, val d: Int, val streamV: Int, val burstSize: Int, val addrWidth: Int, val sizeWidth: Int, val readResp: DRAMReadResponse ) extends Module { class MetaData extends Bundle { val valid = Bool() override def cloneType(): this.type = { new MetaData().asInstanceOf[this.type] } } class ScatterData extends Bundle { val data = UInt(streamW.W) val meta = new MetaData override def cloneType(): this.type = { new ScatterData().asInstanceOf[this.type] } } val countWidth = 16 val v = readResp.rdata.getWidth / streamW val fData = Module(new FIFOCore(new ScatterData, d, v, true)) fData.io.config.chainRead := false.B fData.io.config.chainWrite := false.B val fCmd = Module(new FIFOCore(new Command(addrWidth, sizeWidth, 0), fData.bankSize, 1, true)) fCmd.io.config.chainRead := false.B fCmd.io.config.chainWrite := false.B val fCount = Module(new FIFOCore(UInt(countWidth.W), fData.bankSize, 1, true)) fCount.io.config.chainRead := false.B fCount.io.config.chainWrite := false.B class ScatterBufferIO extends Bundle { class WData extends Bundle { val data = Vec(v, UInt(streamW.W)) val count = UInt(countWidth.W) val cmd = new Command(addrWidth, sizeWidth, 0) override def cloneType(): this.type = { new WData().asInstanceOf[this.type] } } val fifo = new FIFOBaseIO(new WData, fData.bankSize, 1) val rresp = Input(Valid(readResp)) val hit = Output(Bool()) val complete = Output(Bool()) } val io = IO(new ScatterBufferIO) val cmdAddr = Wire(new BurstAddr(addrWidth, streamW, burstSize)) cmdAddr.bits := io.fifo.enq(0).cmd.addr fData.io.enq.zipWithIndex.foreach { case (e, i) => e.meta.valid := (cmdAddr.wordOffset === i.U) e.data := io.fifo.enq(0).data(i) } fCmd.io.enq(0) := io.fifo.enq(0).cmd fCount.io.enq(0) := 1.U val enqVld = io.fifo.enqVld & ~io.hit fData.io.enqVld := enqVld fCmd.io.enqVld := enqVld fCount.io.enqVld := enqVld val (issueHits, respHits) = fCmd.io.banks match { case Some(b) => b.zipWithIndex.map { case (bank, i) => val addr = Wire(new BurstAddr(addrWidth, streamW, burstSize)) addr.bits := bank(0).rdata.addr val valid = bank(0).valid val issueHit = valid & (addr.burstTag === cmdAddr.burstTag) val respHit = valid & (addr.burstTag === io.rresp.bits.tag.uid) & io.rresp.valid (issueHit, respHit) }.unzip case None => throw new Exception } io.hit := issueHits.reduce { _|_ } fData.io.banks match { case Some(b) => b.zipWithIndex.foreach { case (bank, i) => val count = fCount.io.banks.get.apply(i).apply(0) val wen = issueHits(i) & io.fifo.enqVld count.wen := wen count.wdata := count.rdata + 1.U bank.zipWithIndex.foreach { case (d, j) => val writeWord = UIntToOH(cmdAddr.wordOffset)(j) & wen val writeResp = respHits(i) & ~d.rdata.meta.valid d.wen := writeWord | writeResp val rdata = Utils.vecWidthConvert(io.rresp.bits.rdata, streamW) d.wdata.data := Mux(writeWord, io.fifo.enq(0).data(j), rdata(j)) d.wdata.meta.valid := true.B } } case None => throw new Exception } io.fifo.deq(0).data := fData.io.deq.map { _.data } io.fifo.deq(0).cmd := fCmd.io.deq(0) io.fifo.deq(0).count := fCount.io.deq(0) io.fifo.full := fData.io.full io.complete := fData.io.deq.map { _.meta.valid }.reduce { _&_ } & ~fData.io.empty io.fifo.empty := fData.io.empty val deqVld = io.fifo.deqVld & io.complete fData.io.deqVld := deqVld fCmd.io.deqVld := deqVld fCount.io.deqVld := deqVld }
stanford-ppl/spatial-lang
spatial/core/resources/chiselgen/template-level/fringeHW/ScatterBuffer.scala
Scala
mit
3,825
/* * Copyright (c) 2014. Regents of the University of California * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.berkeley.cs.amplab.avocado.postprocessing import org.apache.commons.configuration.HierarchicalConfiguration import org.apache.spark.rdd.RDD import edu.berkeley.cs.amplab.adam.models.ADAMVariantContext import edu.berkeley.cs.amplab.avocado.stats.AvocadoConfigAndStats object Postprocessor { private val stages = List[PostprocessingStage](FilterStrandBias) assert(stages.map(_.stageName).length == stages.map(_.stageName).distinct.length, "Postprocessing stages have duplicated names.") def apply (rdd: RDD[ADAMVariantContext], stageName: String, stageAlgorithm: String, stats: AvocadoConfigAndStats, config: HierarchicalConfiguration): RDD[ADAMVariantContext] = { val stage = stages.find(_.stageName == stageAlgorithm) stage match { case Some(s) => { val c = config.configurationAt(stageName) s.apply(rdd, stats, c) } case None => throw new IllegalArgumentException("Postprocessing stage " + stageAlgorithm + "does not exist.") } } }
fnothaft/avocado
avocado-core/src/main/scala/edu/berkeley/cs/amplab/avocado/postprocessing/Postprocessor.scala
Scala
apache-2.0
1,710
object Test { def main(args: Array[String]): Unit = { def z: Int = 5 Macro.ff(z, 5) } }
som-snytt/dotty
tests/pos-macros/i3898b/quoted_2.scala
Scala
apache-2.0
100
package unluac.parse import java.math.BigInteger import java.nio.ByteBuffer import java.nio.ByteOrder case class BIntegerType(intSize: Int) extends BObjectType[BInteger] { def raw_parse(buffer: ByteBuffer): BInteger = { var value: BInteger = null intSize match { case 0 => value = new BInteger(0) case 1 => value = new BInteger(buffer.get) case 2 => value = new BInteger(buffer.getShort) case 4 => value = new BInteger(buffer.getInt) case _ => { val bytes: Array[Byte] = new Array[Byte](intSize) var start: Int = 0 var delta: Int = 1 if (buffer.order eq ByteOrder.LITTLE_ENDIAN) { start = intSize - 1 delta = -1 } { var i: Int = start while (i >= 0 && i < intSize) { { bytes(i) = buffer.get } i += delta } } value = new BInteger(new BigInteger(bytes)) } } value } def parse(buffer: ByteBuffer, header: BHeader): BInteger = { val value: BInteger = raw_parse(buffer) if (header.debug) { System.out.println("-- parsed <integer> " + value.asInt) } value } }
danielwegener/unluac-scala
shared/src/main/scala/unluac/parse/BIntegerType.scala
Scala
mit
1,235
package test import org.specs2.mutable._ import play.api.test._ import play.api.test.Helpers._ class ApplicationSpec extends SpecificationWithJUnit { "Application" should { "send 404 on a bad request" in { running(FakeApplication()) { route(FakeRequest(GET, "/boum")) must beNone } } "render an empty form on index" in { running(FakeApplication()) { val home = route(FakeRequest(GET, "/")).get status(home) must equalTo(OK) contentType(home) must beSome.which(_ == "text/html") } } "send BadRequest on form error" in { running(FakeApplication()) { val home = route(FakeRequest(GET, "/hello?name=Bob&repeat=xx")).get status(home) must equalTo(BAD_REQUEST) contentType(home) must beSome.which(_ == "text/html") } } "say hello" in { running(FakeApplication()) { val home = route(FakeRequest(GET, "/hello?name=Bob&repeat=10")).get status(home) must equalTo(OK) contentType(home) must beSome.which(_ == "text/html") } } } }
play2-maven-plugin/play2-maven-test-projects
play21/war/helloworld-war-servlet-3.0/test/ApplicationSpec.scala
Scala
apache-2.0
1,119
package nya.kitsunyan.littlechenbot.command.common import nya.kitsunyan.littlechenbot.database.LocaleConfigurationData import nya.kitsunyan.littlechenbot.util._ import info.mukel.telegrambot4s.api._ import info.mukel.telegrambot4s.methods._ import info.mukel.telegrambot4s.models._ import scala.concurrent.Future import scala.language.implicitConversions trait Command extends BotBase with AkkaDefaults with GlobalExecutionContext { case class Bot(nickname: String, id: Long) val bot: Future[Bot] val workspace: Option[Long] val botOwner: Option[Long] implicit val binaries: Binaries case class FilterChat(soft: Boolean, hard: Boolean, filtered: Boolean = false) def filterChat(message: Message): FilterChat = FilterChat(true, true) case class Description(commands: List[String], text: String) def prependDescription(list: List[Description], locale: Locale): List[Description] = list def handleException(causalMessage: Option[Message])(e: Throwable): Unit def handleError(during: Option[String])(causalMessage: Message) (implicit message: Message, arguments: Arguments, locale: Locale): PartialFunction[Throwable, Future[Status]] = { case e: RecoverException => e.future case e: CommandException => replyQuote(e.getMessage, e.parseMode) Future.successful(Status.Fail) case e: Exception => handleErrorCommon(e, causalMessage, during) Future.successful(Status.Fail) } def handleErrorCommon(e: Exception, causalMessage: Message, during: Option[String]) (implicit message: Message, locale: Locale): Future[Any] = { handleException(Some(causalMessage))(e) val anExceptionWasThrown = during .map(locale.AN_EXCEPTION_WAS_THROWN_FORMAT.format(_)) .getOrElse(locale.AN_EXCEPTION_WAS_THROWN) replyQuote(s"$anExceptionWasThrown\\n${userMessageForException(e)}") } def userMessageForException(e: Throwable): String = { e match { case e: UserMessageException => e.userMessage.getOrElse(e.getCause.getClass.getName) case e: java.io.IOException => val message = Option(e.getMessage) .filter(s => !s.contains("http://") && !s.contains("https://")) .filter("\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+".r.findFirstIn(_).isEmpty) .map(": " + _) .getOrElse("") s"${e.getClass.getName}$message" case e => e.getClass.getName } } private def getLocale(chatId: Long): Future[Locale] = { LocaleConfigurationData.get(chatId) .map(_.getOrElse(Locale.English)) } private def filterCommands(commands: List[String], botNickname: String, text: String): Option[String] = { commands.foldLeft[Option[String]](None) { (a, command) => a orElse { val shortCommand = "/" + command val longCommand = shortCommand + "@" + botNickname Seq(shortCommand, longCommand).map { fullCommand => if (text.equals(fullCommand)) { Some("") } else if (text.startsWith(fullCommand) && text.charAt(fullCommand.length) <= ' ') { Some(text.substring(fullCommand.length + 1)) } else { None } }.reduceLeft(_ orElse _) } } } final def filterMessage(message: ExtendedMessage, commands: List[String], success: (Message, Arguments, Locale) => Future[Status], fail: (ExtendedMessage, FilterChat) => Future[Status], filterChat: FilterChat, allow: FilterChat => Boolean): Future[Status] = { bot.flatMap { bot => filterCommands(commands, bot.nickname, message.commandText).map { commandLine => if (allow(filterChat)) { getLocale(message.initial.chat.id).flatMap(success(message.initial, Arguments(commandLine), _)) } else { fail(message, filterChat.copy(filtered = true)) } }.getOrElse(fail(message, filterChat)) } } class RecoverException(val future: Future[Status]) extends Exception class CommandException(message: String, val parseMode: Option[ParseMode.ParseMode] = None) extends Exception(message) case class ExtendedMessage(initial: Message, firstCommand: Boolean, commandText: String) sealed trait Status object Status { case object Cancel extends Status private[Command] case class SuccessMatch(arguments: Arguments) extends Status private[Command] case class FailMatch(arguments: Arguments) extends Status def Success(implicit arguments: Arguments): Status = SuccessMatch(arguments) def Fail(implicit arguments: Arguments): Status = FailMatch(arguments) } final override def receiveMessage(message: Message): Unit = { onMessageExtend(message, false, message.text orElse message.caption) } private def onMessageExtend(message: Message, firstCommand: Boolean, commandText: Option[String]): Future[Status] = { commandText .map(_.trim) .filter(_.nonEmpty) .map(t => handleMessage(ExtendedMessage(message, firstCommand, t), filterChat(message)) .recover((handleException(Some(message))(_)) -> Status.Cancel)).getOrElse(Future.successful(Status.Cancel)) .flatMap { case Status.SuccessMatch(arguments) => arguments.nextCommand match { case Some((Arguments.NextMode.OnSuccess, commandText)) => onMessageExtend(message, false, Some(commandText)) case _ => Future.successful(Status.Cancel) } case Status.FailMatch(arguments) => arguments.nextCommand match { case Some((Arguments.NextMode.OnFail, commandText)) => onMessageExtend(message, false, Some(commandText)) case _ => Future.successful(Status.Cancel) } case Status.Cancel => Future.successful(Status.Cancel) } } def handleMessage(message: ExtendedMessage, filterChat: FilterChat): Future[Status] = { if (filterChat.hard && !filterChat.soft && filterChat.filtered) { getLocale(message.initial.chat.id) .flatMap(handleNotPermittedWarning(message.initial, _)) .statusMap(Status.Cancel) } else { Future.successful(Status.Cancel) } } def handleNotPermittedWarning(implicit message: Message, locale: Locale): Future[Any] = Future.unit def checkArguments(arguments: Arguments, maxFreeValues: Int, possibleArguments: String*) (implicit locale: Locale): Future[Unit] = { val invalidArguments = arguments.keySet.diff(possibleArguments.toSet[String]) if (invalidArguments.nonEmpty || arguments.free.length > maxFreeValues) { val printInvalidArgument = clearMarkup(invalidArguments.find(_.nonEmpty) .getOrElse(arguments.free.flatMap(_.asString.flatMap(_.split("\\n").headOption)).find(_.nonEmpty).getOrElse(""))) Future.failed(new CommandException(s"${locale.INVALID_ARGUMENT_FS}: $printInvalidArgument.")) } else { Future.unit } } def replyQuote(text: String, parseMode: Option[ParseMode.ParseMode] = None) (implicit message: Message): Future[Message] = { request(SendMessage(message.source, text, parseMode, replyToMessageId = Some(message.messageId))) } def reply(text: String, parseMode: Option[ParseMode.ParseMode] = None) (implicit message: Message): Future[Message] = { request(SendMessage(message.source, text, parseMode)) } def replyMan(description: String, list: List[(List[String], Option[String], String)]) (implicit message: Message): Future[Message] = { val argumentsListText = list.map { case (parameters, values, description) => val parametersFull = "`" + parameters.reduce(_ + "`, `" + _) + "`" val maxLength = 30 val space = " " val (_, descriptionFull) = description.split(" +").foldLeft((maxLength, "")) { (acc, value) => val (line, result) = acc val length = value.length + (if (line > 0) 1 else 0) if (line + length > maxLength && line > 0) { (value.length, s"$result\\n$space$value") } else { (line + length, s"$result $value") } } val valuesFull = values.map(v => s" `[$v]`").getOrElse("") s"$parametersFull$valuesFull$descriptionFull" }.foldLeft("")(_ + "\\n" + _) val text = if (argumentsListText.nonEmpty) s"$description\\n$argumentsListText" else description replyQuote(text, Some(ParseMode.Markdown)) } def clearMarkup(text: String): String = { text.replaceAll("[`*_\\\\[\\\\]()]", "") } def trimCaption(caption: String): String = { // 200 is maximum caption length for Telegram if (caption.length > 200) { val index = caption.lastIndexOf('\\n', 200) if (index >= 0) { caption.substring(0, index) } else { caption } } else { caption } } object WorkspaceRequest { def apply(command: String)(id: Int): String = { s"[request:$command:$id]" } def parse(command: String)(message: String)(implicit locale: Locale): Option[Int] = { "\\\\[request:(\\\\w+?):(-?\\\\d+)\\\\]".r.findFirstMatchIn(message) .map(_.subgroups) .map(g => (g(0), g(1).toInt)) .map { case (parsedCommand, id) => if (parsedCommand == command) { id } else { throw new CommandException(locale.DIFFERENT_COMMANDS_FORMAT.format(s"`/$command`", s"`/$parsedCommand`"), Some(ParseMode.Markdown)) } } } } implicit def anyThrowable[T](function: Throwable => T): PartialFunction[Throwable, T] = { case e => function(e) } implicit def anyThrowableWithFallback[T](functionAndFallback: (Throwable => Unit, T)): PartialFunction[Throwable, T] = { case e => functionAndFallback match { case (function, result) => function(e) result } } class RecoverableFuture[A, B, R](future: Future[(A, B)], callback: (A, B) => Future[R]) { def recoverWith[T >: R](defaultValue: A)(recover: A => PartialFunction[Throwable, Future[T]]): Future[T] = { future.flatMap { case (a, b) => callback(a, b).recoverWith(recover(a)) }.recoverWith(recover(defaultValue)) } } class ScopeFuture[A, B](future: Future[(A, B)]) { def scopeFlatMap[R](callback: (A, B) => Future[R]): RecoverableFuture[A, B, R] = { new RecoverableFuture(future, callback) } } implicit def scopeFuture[A, B](future: Future[(A, B)]): ScopeFuture[A, B] = { new ScopeFuture(future) } class UnitFuture(future: Future[Unit]) { def unitMap[T](f: => T): Future[T] = future.map(_ => f) def unitFlatMap[T](f: => Future[T]): Future[T] = future.flatMap(_ => f) } implicit def unitFuture(future: Future[Unit]): UnitFuture = { new UnitFuture(future) } class StatusFuture[T](future: Future[T]) { def statusMap(status: Status): Future[Status] = future.map(_ => status) } implicit def statusFuture[T](future: Future[T]): StatusFuture[T] = { new StatusFuture(future) } }
kitsunyan/Little-Chen-Bot
src/nya/kitsunyan/littlechenbot/command/common/Command.scala
Scala
mit
10,820
package jp.leafytree.android.hello class FlavorScala { def name() = { "flavor2Scala" } }
saturday06/gradle-android-scala-plugin
src/integTest/simpleFlavor/src/flavor2/scala/jp/leafytree/android/hello/FlavorScala.scala
Scala
apache-2.0
98
package fr.hsyl20.sme.tutorial import com.jme3.app.SimpleApplication import com.jme3.material.Material import com.jme3.math.ColorRGBA import com.jme3.math.Vector3f import com.jme3.scene.Geometry import com.jme3.scene.shape.Box /** Sample 4 - how to trigger repeating actions from the main update loop. * In this example, we make the player character rotate. */ class HelloLoop extends SimpleApplication { protected var player:Geometry = null override def simpleInitApp: Unit = { val b = new Box(Vector3f.ZERO, 1, 1, 1) player = new Geometry("blue cube", b) val mat = new Material(assetManager, "Common/MatDefs/Misc/Unshaded.j3md") mat.setColor("Color", ColorRGBA.Blue) player.setMaterial(mat) rootNode.attachChild(player) } /* This is the update loop */ override def simpleUpdate(tpf:Float): Unit = { // make the player rotate player.rotate(0, 2*tpf, 0) } } object HelloLoop { def main(args:Array[String]): Unit = { import java.util.logging.{Logger,Level} Logger.getLogger("").setLevel(Level.WARNING); val app = new HelloLoop app.start } }
hsyl20/SME
src/main/scala/tutorial/HelloLoop.scala
Scala
gpl-3.0
1,115
package domain.model /** * GraPHPizer source code analytics engine * Copyright (C) 2015 Martin Helmich <kontakt@martin-helmich.de> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ import java.util.UUID import org.joda.time.Instant case class Snapshot(id: UUID, timestamp: Instant, size: Long) { }
martin-helmich/graphpizer-server
app/domain/model/Snapshot.scala
Scala
gpl-3.0
907
package com.microsoft.partnercatalyst.fortis.spark.transforms.locations.dto import com.microsoft.partnercatalyst.fortis.spark.dto.Location case class FeatureServiceResponse(features: List[FeatureServiceFeature]) case class FeatureServiceFeature(id: String, name: String, layer: String, centroid: Option[List[Double]] = None) object FeatureServiceFeature { val DefaultLatitude = -1d val DefaultLongitude = -1d def toLocation(feature: FeatureServiceFeature): Location = { Location( wofId = feature.id, name = feature.name, layer = feature.layer, longitude = feature.centroid.map(_.head).getOrElse(DefaultLongitude), latitude = feature.centroid.map(_.tail.head).getOrElse(DefaultLatitude)) } }
CatalystCode/project-fortis-spark
src/main/scala/com/microsoft/partnercatalyst/fortis/spark/transforms/locations/dto/Json.scala
Scala
mit
737
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.hive.test.TestHive import org.apache.spark.sql.hive.test.TestHive._ case class Data(a: Int, B: Int, n: Nested) case class Nested(a: Int, B: Int) /** * A set of test cases expressed in Hive QL that are not covered by the tests included in the hive distribution. */ class HiveResolutionSuite extends HiveComparisonTest { createQueryTest("table.attr", "SELECT src.key FROM src ORDER BY key LIMIT 1") createQueryTest("database.table", "SELECT key FROM default.src ORDER BY key LIMIT 1") createQueryTest("database.table table.attr", "SELECT src.key FROM default.src ORDER BY key LIMIT 1") createQueryTest("alias.attr", "SELECT a.key FROM src a ORDER BY key LIMIT 1") createQueryTest("subquery-alias.attr", "SELECT a.key FROM (SELECT * FROM src ORDER BY key LIMIT 1) a") createQueryTest("quoted alias.attr", "SELECT `a`.`key` FROM src a ORDER BY key LIMIT 1") createQueryTest("attr", "SELECT key FROM src a ORDER BY key LIMIT 1") createQueryTest("alias.star", "SELECT a.* FROM src a ORDER BY key LIMIT 1") test("case insensitivity with scala reflection") { // Test resolution with Scala Reflection TestHive.sparkContext.parallelize(Data(1, 2, Nested(1,2)) :: Nil) .registerAsTable("caseSensitivityTest") hql("SELECT a, b, A, B, n.a, n.b, n.A, n.B FROM caseSensitivityTest") } /** * Negative examples. Currently only left here for documentation purposes. * TODO(marmbrus): Test that catalyst fails on these queries. */ /* SemanticException [Error 10009]: Line 1:7 Invalid table alias 'src' createQueryTest("table.*", "SELECT src.* FROM src a ORDER BY key LIMIT 1") */ /* Invalid table alias or column reference 'src': (possible column names are: key, value) createQueryTest("tableName.attr from aliased subquery", "SELECT src.key FROM (SELECT * FROM src ORDER BY key LIMIT 1) a") */ }
zhangjunfang/eclipse-dir
spark/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
Scala
bsd-2-clause
2,768
import java.nio.file.{DirectoryStream, Files, Path, Paths} import java.util.stream.Stream import org.json4s.jackson.JsonMethods._ import org.json4s.DefaultFormats import scala.concurrent.ExecutionContext.Implicits.global import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} object GithubControlledFutures { implicit val formats = DefaultFormats val path = Paths.get("github") def main(args: Array[String]): Unit = { val ghDirStream: DirectoryStream[Path] = Files.newDirectoryStream(path) val filesList: Seq[Path] = ghDirStream.asScala.toSeq val eventCounts: Future[Seq[Map[String, BigDecimal]]] = Future.sequence(filesList.map(eventCountForFile)) val finalCountsFuture: Future[Map[String, BigDecimal]] = eventCounts.map { _.foldLeft(Map[String, BigDecimal]())(addMaps) } val result: Map[String, BigDecimal] = Await.result(finalCountsFuture, 5.minute) result.foreach { case (k, v) => println(s"$v - $k") } //Make sure that dirStream is closed ghDirStream.close() } def eventCountForFile(p: Path): Future[Map[String, BigDecimal]] = { val lineStream: Stream[String] = Files.lines(p) Future { println(s"Working on file $p") val lines = lineStream.iterator().asScala lines.foldLeft(Map[String, BigDecimal]()) { (state: Map[String, BigDecimal], current: String) => val eventName: String = (parse(current) \\ "type").extract[String] addMaps(state, Map(eventName -> 1)) } } andThen { case _ => println(s"Closing stream for $p") lineStream.close() } } def addMaps(m1: Map[String, BigDecimal], m2: Map[String, BigDecimal]): Map[String, BigDecimal] = m1 ++ m2.map { case (k, v) => k -> (v + m1.getOrElse(k, 0)) } }
vijaykiran/ghstats
src/main/scala/GithubControlledFutures.scala
Scala
apache-2.0
1,830
/* * Copyright 2006-2014 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package db import java.sql.Connection import net.liftweb.common._ import net.liftweb.util.ConnectionIdentifier /** * Vend JDBC connections */ trait ConnectionManager { def newConnection(name: ConnectionIdentifier): Box[Connection] def releaseConnection(conn: Connection) def newSuperConnection(name: ConnectionIdentifier): Box[SuperConnection] = Empty }
lzpfmh/framework-2
persistence/db/src/main/scala/net/liftweb/db/ConnectionManager.scala
Scala
apache-2.0
1,002
package fpinscala.streamingio // IO aka IO3.Free[Par,_] import fpinscala.iomonad.{IO,TailRec,Monad,Free,unsafePerformIO} import scala.language.higherKinds import scala.language.postfixOps import scala.language.implicitConversions import scala.collection.immutable.{Stream => CollectionStream} import fpinscala.laziness.{Stream => FPStream} object GeneralizedStreamTransducers { /* Our generalized process type is parameterized on the protocol used for communicating with the driver. This works similarly to the `IO` type we defined in chapter 13. The `Await` constructor emits a request of type `F[A]`, and receives a response of type `Either[Throwable,A]`: trait Process[F,A] case class Await[F[_],A,O]( req: F[A], recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O] case class Halt[F[_],O](err: Throwable) extends Process[F,O] case class Emit[F[_],O](head: O, tail: Process[F,O]) extends Process[F,O] The `Await` constructor may now receive a successful result or an error. The `Halt` constructor now has a _reason_ for termination, which may be either normal termination indicated by the special exception `End`, forceful terimation, indicated by the special exception `Kill`, or some other error. We'll use the improved `Await` and `Halt` cases together to ensure that all resources get released, even in the event of exceptions. */ import fpinscala.iomonad.IO3 import fpinscala.iomonad.Task // translate an IO into a Task // we have MonadCatch[Task] already val ioMonad = new MonadCatch[IO] { // val monadWithoutCatch = IO3.freeMonad[IO] def unit[A](a: => A): IO[A] = IO(a) // defined in package object iomonad //monadWithoutCatch.unit(a) def flatMap[A,B](a: IO[A])(f: A => IO[B]): IO[B] = a.flatMap(f) //monadWithoutCatch.flatMap(a)(f) /* recall IO[A] = Free[Par,A] IO does not have built-in containment or handling of failure. It can only by handled by adding these features to type A, i.e. Option[A] */ def attempt[A](a: IO[A]): IO[Either[Throwable,A]] = a.map((a: A) => Right(a)) def fail[A](t: Throwable): IO[A] = throw t // not graceful! note this eludes the typechecker } trait Process[F[_],O] { import Process._ /* * Many of the same operations can be defined for this generalized * `Process` type, regardless of the choice of `F`. */ def map[O2](f: O => O2): Process[F,O2] = this match { case Await(req,recv) => Await(req, recv andThen (_ map f)) case Emit(h, t) => Try { Emit(f(h), t map f) } case Halt(err) => Halt(err) } def ++(p: => Process[F,O]): Process[F,O] = this.onHalt { case End => Try(p) // we consult `p` only on normal termination case err => Halt(err) } /* * Like `++`, but _always_ runs `p`, even if `this` halts with an error. */ def onComplete(p: => Process[F,O]): Process[F,O] = this.onHalt { case End => p.asFinalizer case err => p.asFinalizer ++ Halt(err) // we always run `p`, but preserve any errors } def asFinalizer: Process[F,O] = this match { case Emit(h, t) => Emit(h, t.asFinalizer) case Halt(e) => Halt(e) case Await(req,recv) => await(req) { case Left(Kill) => this.asFinalizer case x => recv(x) } } def onHalt(f: Throwable => Process[F,O]): Process[F,O] = this match { case Halt(e) => Try(f(e)) case Emit(h, t) => Emit(h, t.onHalt(f)) case Await(req,recv) => Await(req, recv andThen (_.onHalt(f))) } /* * Anywhere we _call_ `f`, we catch exceptions and convert them to `Halt`. * See the helper function `Try` defined below. */ def flatMap[O2](f: O => Process[F,O2]): Process[F,O2] = this match { case Halt(err: Throwable) => Halt(err) case Emit(o: O, t: Process[F,O]) => Try(f(o)) ++ t.flatMap(f) case Await( req: F[_], recv: Function1[Either[Throwable,_], Process[F,O]] ) => Await(req, recv andThen (_ flatMap f)) } def repeat: Process[F,O] = this ++ this.repeat def repeatNonempty: Process[F,O] = { val cycle = (this.map(o => Some(o): Option[O]) ++ emit(None)).repeat // cut off the cycle when we see two `None` values in a row, as this // implies `this` has produced no values during an iteration val trimmed = cycle |> window2 |> (takeWhile { case (Some(None), None) => false case _ => true }) trimmed.map(_._2).flatMap { case None => Halt(End) case Some(o) => emit(o) } } //with IO3.freeMonad[IO] /* Exercise 10: This function is defined only if given a `MonadCatch[F]`. Unlike the simple `runLog` interpreter defined in the companion object below, this is not tail recursive and responsibility for stack safety is placed on the `Monad` instance. This will replace the other runLog definition, provided a monad MonadCatch[IO] --- runLog(implicit monadCatch: MonadCatch[IO]): IO[IndexedSeq[O]] How about a simpler example... a Par that produces output runLog(implicit monadCatch: MonadCatch[Par]): Par[IndexedSeq[O]] Other examples than IO will make more sense after understanding the possibilities of this parametrized Process. " In order to make Process extensible, we’ll parameterize on the protocol used for issuing requests of the driver. " */ def runLog(implicit monadCatch: MonadCatch[F]): F[IndexedSeq[O]] = this match { case Await( req: F[_], recv: Function1[Either[Throwable,_],Process[F,O]] ) => { val attempted = monadCatch.attempt(req) // F[Either[Throwable,_]] val fNext = monadCatch.map(attempted){ (either: Either[Throwable,_])=> recv(either) } // F[Process[F,O]] //next.runLog(monadCatch): F[IndexedSeq[O]] val fIndexedSeq = monadCatch.flatMap(fNext){ (proc: Process[F,O]) => proc.runLog(monadCatch) } fIndexedSeq } case Emit(h, t) => { // merge h: O and IndexedSeq[O] val fO: F[O] = monadCatch.unit(h) val fIndexedSeqO: F[IndexedSeq[O]] = t.runLog(monadCatch) val merged = monadCatch.map2(fO,fIndexedSeqO){ (o: O, iso: IndexedSeq[O]) => iso :+ o } merged } case Halt(err) => { throw err // caught by case Await? // err match { // case End => monadCatch.unit(IndexedSeq[O]()) } } /* * We define `Process1` as a type alias - see the companion object * for `Process` below. Using that, we can then define `|>` once * more. The definition is extremely similar to our previous * definition. We again use the helper function, `feed`, to take * care of the case where `this` is emitting values while `p2` * is awaiting these values. * * The one subtlety is we make sure that if `p2` halts, we * `kill` this process, giving it a chance to run any cleanup * actions (like closing file handles, etc). */ def |>[O2](p2: Process1[O,O2]): Process[F,O2] = { p2 match { case Halt(e) => this.kill onHalt { e2 => Halt(e) ++ Halt(e2) } case Emit(h, t) => Emit(h, this |> t) case Await(req,recv) => this match { case Halt(err) => Halt(err) |> recv(Left(err)) case Emit(h,t) => t |> Try(recv(Right(h))) case Await(req0,recv0) => await(req0)(recv0 andThen (_ |> p2)) } } } @annotation.tailrec final def kill[O2]: Process[F,O2] = this match { case Await(req,recv) => recv(Left(Kill)).drain.onHalt { case Kill => Halt(End) // we convert the `Kill` exception back to normal termination case e => Halt(e) } case Halt(e) => Halt(e) case Emit(h, t) => t.kill } /** Alias for `this |> p2`. */ def pipe[O2](p2: Process1[O,O2]): Process[F,O2] = this |> p2 final def drain[O2]: Process[F,O2] = this match { case Halt(e) => Halt(e) case Emit(h, t) => t.drain case Await(req,recv) => Await(req, recv andThen (_.drain)) } def filter(f: O => Boolean): Process[F,O] = this |> Process.filter(f) def take(n: Int): Process[F,O] = this |> Process.take(n) def once: Process[F,O] = take(1) /* * Use a `Tee` to interleave or combine the outputs of `this` and * `p2`. This can be used for zipping, interleaving, and so forth. * Nothing requires that the `Tee` read elements from each * `Process` in lockstep. It could read fifty elements from one * side, then two elements from the other, then combine or * interleave these values in some way, etc. * * This definition uses two helper functions, `feedL` and `feedR`, * which feed the `Tee` in a tail-recursive loop as long as * it is awaiting input. */ def tee[O2,O3](p2: Process[F,O2])(t: Tee[O,O2,O3]): Process[F,O3] = { t match { case Halt(e) => this.kill onComplete p2.kill onComplete Halt(e) case Emit(h,t) => Emit(h, (this tee p2)(t)) case Await(side, recv) => side.get match { case Left(isO) => this match { case Halt(e) => p2.kill onComplete Halt(e) case Emit(o,ot) => (ot tee p2)(Try(recv(Right(o)))) case Await(reqL, recvL) => await(reqL)(recvL andThen (this2 => this2.tee(p2)(t))) } case Right(isO2) => p2 match { case Halt(e) => this.kill onComplete Halt(e) case Emit(o2,ot) => (this tee ot)(Try(recv(Right(o2)))) case Await(reqR, recvR) => await(reqR)(recvR andThen (p3 => this.tee(p3)(t))) } } } } def zipWith[O2,O3](p2: Process[F,O2])(f: (O,O2) => O3): Process[F,O3] = (this tee p2)(Process.zipWith(f)) def zip[O2](p2: Process[F,O2]): Process[F,(O,O2)] = zipWith(p2)((_,_)) def to[O2](sink: Sink[F,O]): Process[F,Unit] = join { (this zipWith sink)((o,f) => f(o)) } def through[O2](p2: Channel[F, O, O2]): Process[F,O2] = join { (this zipWith p2)((o,f) => f(o)) } } object Process { case class Await[F[_],A,O]( req: F[A], recv: Either[Throwable,A] => Process[F,O]) extends Process[F,O] case class Emit[F[_],O]( head: O, tail: Process[F,O]) extends Process[F,O] case class Halt[F[_],O](err: Throwable) extends Process[F,O] def emit[F[_],O]( head: O, tail: Process[F,O] = Halt[F,O](End)): Process[F,O] = Emit(head, tail) def await[F[_],A,O](req: F[A])(recv: Either[Throwable,A] => Process[F,O]): Process[F,O] = Await(req, recv) import fpinscala.iomonad.Monad def monad[F[_]]: Monad[({ type f[x] = Process[F,x]})#f] = new Monad[({ type f[x] = Process[F,x]})#f] { def unit[O](o: => O): Process[F,O] = emit(o) def flatMap[O,O2](p: Process[F,O])(f: O => Process[F,O2]): Process[F,O2] = p flatMap f } // enable monadic syntax for `Process` type implicit def toMonadic[F[_],O](a: Process[F,O]) = monad[F].toMonadic(a) /** * Helper function to safely produce `p`, or gracefully halt * with an error if an exception is thrown. Replace any exception with a Halt. */ def Try[F[_],O](p: => Process[F,O]): Process[F,O] = try p catch { case e: Throwable => Halt(e) } /* * Safely produce `p`, or run `cleanup` and halt gracefully with the * exception thrown while evaluating `p`. */ def TryOr[F[_],O](p: => Process[F,O])(cleanup: Process[F,O]): Process[F,O] = try p catch { case e: Throwable => cleanup ++ Halt(e) } /* * Safely produce `p`, or run `cleanup` or `fallback` if an exception * occurs while evaluating `p`. */ def TryAwait[F[_],O](p: => Process[F,O])(fallback: Process[F,O], cleanup: Process[F,O]): Process[F,O] = try p catch { case End => fallback case e: Throwable => cleanup ++ Halt(e) } /* Our generalized `Process` type can represent sources! */ import fpinscala.iomonad.IO /* Special exception indicating normal termination */ case object End extends Exception /* Special exception indicating forceful termination */ case object Kill extends Exception /* * A `Process[F,O]` where `F` is a monad like `IO` can be thought of * as a source. */ /* type IO[A] = IO3.IO[A] def IO[A](a: => A): IO[A] = IO3.IO[A](a) * Here is a simple tail recursive function to collect all the * output of a `Process[IO,O]`. Notice we are using the fact * that `IO` can be `run` to produce either a result or an * exception. Makes sure that resource, in this case an Executor, is closed. */ def runLog[O](src: Process[IO,O]): IO[IndexedSeq[O]] = IO { val E = java.util.concurrent.Executors.newFixedThreadPool(4) @annotation.tailrec def go(cur: Process[IO,O], acc: IndexedSeq[O]): IndexedSeq[O] = cur match { case Emit(h,t) => go(t, acc :+ h) case Halt(End) => acc case Halt(err) => throw err // caught below? case Await(req,recv) => val next = try recv(Right(fpinscala.iomonad.unsafePerformIO(req)(E))) catch { case err: Throwable => recv(Left(err)) } go(next, acc) } try go(src, IndexedSeq()) finally E.shutdown } /* * We can write a version of collect that works for any `Monad`. * See the definition in the body of `Process`. */ import java.io.{BufferedReader,FileReader} val p: Process[IO, String] = await(IO(new BufferedReader(new FileReader("resources/lines.txt")))) { case Right(b) => lazy val next: Process[IO,String] = await(IO(b.readLine)) { case Left(e) => await(IO(b.close))(_ => Halt(e)) case Right(line) => Emit(line, next) } next case Left(e) => Halt(e) } /* * Generic combinator for producing a `Process[IO,O]` from some * effectful `O` source. The source is tied to some resource, * `R` (like a file handle) that we want to ensure is released. * See `lines` below for an example use. */ def resource[R,O](acquire: IO[R])( use: R => Process[IO,O])( release: R => Process[IO,O]): Process[IO,O] = eval(acquire) flatMap { r => use(r).onComplete(release(r)) } /* * Like `resource`, but `release` is a single `IO` action. */ def resource_[R,O](acquire: IO[R])( use: R => Process[IO,O])( release: R => IO[Unit]): Process[IO,O] = resource(acquire)(use)(release andThen (eval_[IO,Unit,O])) /* * Create a `Process[IO,O]` from the lines of a file, using * the `resource` combinator above to ensure the file is closed * when processing the stream of lines is finished. */ def lines(filename: String): Process[IO,String] = resource { IO(io.Source.fromFile(filename)) } { src => lazy val iter = src.getLines // a stateful iterator def step = if (iter.hasNext) Some(iter.next) else None lazy val lines: Process[IO,String] = eval(IO(step)).flatMap { case None => Halt(End) case Some(line) => Emit(line, lines) } lines } { src => eval_ { IO(src.close) } } /* Exercise 11: Implement `eval`, `eval_`, and use these to implement `lines`. similar to val 'p' above */ def eval[F[_],A](a: F[A]): Process[F,A] = await(a){ (either: Either[Throwable,A]) => either match { case Right(a) => { lazy val next: Process[F,A] = emit(a) next } case Left(err) => { Halt(err) } } } /* Evaluate the action purely for its effects. */ def eval_[F[_],A,B](a: F[A]): Process[F,B] = await(a){ (either: Either[Throwable,A]) => Halt[F,B](End)} /* Helper function with better type inference. */ def evalIO[A](a: IO[A]): Process[IO,A] = eval[IO,A](a) /* * We now have nice, resource safe effectful sources, but we don't * have any way to transform them or filter them. Luckily we can * still represent the single-input `Process` type we introduced * earlier, which we'll now call `Process1`. */ case class Is[I]() { sealed trait f[X] val Get = new f[I] {} } def Get[I] = Is[I]().Get type Process1[I,O] = Process[Is[I]#f, O] /* Some helper functions to improve type inference. */ def await1[I,O]( recv: I => Process1[I,O], fallback: => Process1[I,O] = halt1[I,O]): Process1[I, O] = Await(Get[I], (e: Either[Throwable,I]) => e match { case Left(End) => fallback case Left(err) => Halt(err) case Right(i) => Try(recv(i)) }) def emit1[I,O](h: O, tl: Process1[I,O] = halt1[I,O]): Process1[I,O] = emit(h, tl) def halt1[I,O]: Process1[I,O] = Halt[Is[I]#f, O](End) def lift[I,O](f: I => O): Process1[I,O] = await1[I,O]((i:I) => emit(f(i))) repeat def filter[I](f: I => Boolean): Process1[I,I] = await1[I,I](i => if (f(i)) emit(i) else halt1) repeat // we can define take, takeWhile, and so on as before def take[I](n: Int): Process1[I,I] = if (n <= 0) halt1 else await1[I,I](i => emit(i, take(n-1))) def takeWhile[I](f: I => Boolean): Process1[I,I] = await1(i => if (f(i)) emit(i, takeWhile(f)) else halt1) def dropWhile[I](f: I => Boolean): Process1[I,I] = await1(i => if (f(i)) dropWhile(f) else emit(i,id)) def id[I]: Process1[I,I] = await1((i: I) => emit(i, id)) def window2[I]: Process1[I,(Option[I],I)] = { def go(prev: Option[I]): Process1[I,(Option[I],I)] = await1[I,(Option[I],I)](i => emit(prev -> i) ++ go(Some(i))) go(None) } /** Emits `sep` in between each input received. */ def intersperse[I](sep: I): Process1[I,I] = await1[I,I](i => emit1(i) ++ id.flatMap(i => emit1(sep) ++ emit1(i))) /* We sometimes need to construct a `Process` that will pull values from multiple input sources. For instance, suppose we want to 'zip' together two files, `f1.txt` and `f2.txt`, combining corresponding lines in some way. Using the same trick we used for `Process1`, we can create a two-input `Process` which can request values from either the 'left' stream or the 'right' stream. We'll call this a `Tee`, after the letter 'T', which looks like a little diagram of two inputs being combined into one output. Note that MonadCatch[T] is not defined. Cannot use runLog? */ case class T[I,I2]() { sealed trait f[X] { def get: Either[I => X, I2 => X] } val L = new f[I] { def get = Left(identity) } val R = new f[I2] { def get = Right(identity) } } def L[I,I2] = T[I,I2]().L def R[I,I2] = T[I,I2]().R type Tee[I,I2,O] = Process[T[I,I2]#f, O] /* Again some helper functions to improve type inference. */ def haltT[I,I2,O]: Tee[I,I2,O] = Halt[T[I,I2]#f,O](End) def awaitL[I,I2,O](recv: I => Tee[I,I2,O], fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] = await[T[I,I2]#f,I,O](L) { case Left(End) => fallback case Left(err) => Halt(err) case Right(a) => Try(recv(a)) } def awaitR[I,I2,O](recv: I2 => Tee[I,I2,O], fallback: => Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] = await[T[I,I2]#f,I2,O](R) { case Left(End) => fallback case Left(err) => Halt(err) case Right(a) => Try(recv(a)) } def emitT[I,I2,O](h: O, tl: Tee[I,I2,O] = haltT[I,I2,O]): Tee[I,I2,O] = emit(h, tl) def zipWith[I,I2,O](f: (I,I2) => O): Tee[I,I2,O] = awaitL[I,I2,O](i => awaitR (i2 => emitT(f(i,i2)))) repeat def zip[I,I2]: Tee[I,I2,(I,I2)] = zipWith((_,_)) /* Ignores all input from left. */ def passR[I,I2]: Tee[I,I2,I2] = awaitR(emitT(_, passR)) /* Ignores input from the right. */ def passL[I,I2]: Tee[I,I2,I] = awaitL(emitT(_, passL)) /* Alternate pulling values from the left and the right inputs. */ def interleaveT[I]: Tee[I,I,I] = awaitL[I,I,I](i => awaitR (i2 => emitT(i) ++ emitT(i2))) repeat /* Our `Process` type can also represent effectful sinks (like a file). A `Sink` is simply a source of effectful functions! See the definition of `to` in `Process` for an example of how to feed a `Process` to a `Sink`. */ type Sink[F[_],O] = Process[F, O => Process[F,Unit]] import java.io.FileWriter /* A `Sink` which writes input strings to the given file. */ def fileW(file: String, append: Boolean = false): Sink[IO,String] = resource[FileWriter, String => Process[IO,Unit]] { IO { new FileWriter(file, append) }} { w => constant { (s: String) => eval[IO,Unit](IO(w.write(s))) }} { w => eval_(IO(w.close)) } /* The infinite, constant stream. */ def constant[A](a: A): Process[IO,A] = eval(IO(a)).flatMap { a => Emit(a, constant(a)) } /* Exercise 12: Implement `join`. Notice this is the standard monadic combinator! */ // use monad def join[F[_],A](p: Process[F,Process[F,A]]): Process[F,A] = { val md = monad[F] md.join(p) } // p.flatMap { // (procFA: Process[F,A]) => procFA // } /* * An example use of the combinators we have so far: incrementally * convert the lines of a file from fahrenheit to celsius. */ import fpinscala.iomonad.IO0.fahrenheitToCelsius val converter: Process[IO,Unit] = lines("fahrenheit.txt"). filter(line => !line.startsWith("#") && !line.trim.isEmpty). map(line => fahrenheitToCelsius(line.toDouble).toString). pipe(intersperse("\\n")). to(fileW("celsius.txt")). drain /* More generally, we can feed a `Process` through an effectful channel which returns a value other than `Unit`. */ type Channel[F[_],I,O] = Process[F, I => Process[F,O]] /* * Here is an example, a JDBC query runner which returns the * stream of rows from the result set of the query. We have * the channel take a `Connection => PreparedStatement` as * input, so code that uses this channel does not need to be * responsible for knowing how to obtain a `Connection`. */ import java.sql.{Connection, PreparedStatement, ResultSet} def query(conn: IO[Connection]): Channel[IO, Connection => PreparedStatement, Map[String,Any]] = resource_ { conn } { conn => constant { (q: Connection => PreparedStatement) => resource_ { IO { val rs = q(conn).executeQuery val ncols = rs.getMetaData.getColumnCount val cols = (1 to ncols).map(rs.getMetaData.getColumnName) (rs, cols) }} { case (rs, cols) => def step = if (!rs.next) None else Some(cols.map(c => (c, rs.getObject(c): Any)).toMap) lazy val rows: Process[IO,Map[String,Any]] = eval(IO(step)).flatMap { case None => Halt(End) case Some(row) => Emit(row, rows) } rows } { p => IO { p._1.close } } // close the ResultSet }} { c => IO(c.close) } /* * We can allocate resources dynamically when defining a `Process`. * As an example, this program reads a list of filenames to process * _from another file_, opening each file, processing it and closing * it promptly. */ val convertAll: Process[IO,Unit] = (for { out <- fileW("celsius.txt").once file <- lines("fahrenheits.txt") _ <- lines(file). map(line => fahrenheitToCelsius(line.toDouble)). flatMap(celsius => out(celsius.toString)) } yield ()) drain /* * Just by switching the order of the `flatMap` calls, we can output * to multiple files. */ val convertMultisink: Process[IO,Unit] = (for { file <- lines("fahrenheits.txt") _ <- lines(file). map(line => fahrenheitToCelsius(line.toDouble)). map(_ toString). to(fileW(file + ".celsius")) } yield ()) drain /* * We can attach filters or other transformations at any point in the * program, for example: */ val convertMultisink2: Process[IO,Unit] = (for { file <- lines("fahrenheits.txt") _ <- lines(file). filter(!_.startsWith("#")). map(line => fahrenheitToCelsius(line.toDouble)). filter(_ > 0). // ignore below zero temperatures map(_ toString). to(fileW(file + ".celsius")) } yield ()) drain } } object ProcessTest extends App { import GeneralizedStreamTransducers._ import fpinscala.iomonad.IO import Process._ val p = eval(IO { println("woot"); 1 }).repeat val p2 = eval(IO { println("cleanup"); 2 } ).onHalt { case Kill => println { "cleanup was killed, instead of bring run" }; Halt(Kill) case e => Halt(e) } println { Process.runLog { p2.onComplete(p2).onComplete(p2).take(1).take(1) } } println { Process.runLog(converter) } // println { Process.collect(Process.convertAll) } } object GeneralizedStreamTransducerTests extends App { import GeneralizedStreamTransducers._ import fpinscala.iomonad.IO import fpinscala.iomonad.IO3 import Process._ import fpinscala.parallelism.Nonblocking.Par // val numberFile = new java.io.File("/home/peterbecich/scala/fpinscala/exercises/src/main/scala/fpinscala/streamingio/numbers.txt") import java.io.{BufferedReader,FileReader} // can't think of a way to turn p into Process[Task,String] val p: Process[IO, String] = lines("resources/numbers.txt") // await(IO(new BufferedReader(new FileReader("resources/numbers.txt")))) { // case Right(b) => // lazy val next: Process[IO,String] = await(IO(b.readLine)) { // case Left(e) => await(IO(b.close))(_ => Halt(e)) // case Right(line) => Emit(line, next) // } // next // case Left(e) => Halt(e) // } // val numbersOut = runLog(p) // println(numbersOut) val io = p.runLog(ioMonad) println("IO to be run:") println(io) //val func0 = IO3.translate(io) // later, merge Monad in IOMonad package with Monad in Monads package val par = IO3.run(io)(IO3.parMonad) println("par to be run:") println(par) import java.util.concurrent.ExecutorService import java.util.concurrent.Executors val service = Executors.newFixedThreadPool(4) val numbers = Par.run(service)(par) println("numbers:") println(numbers) service.shutdown() }
peterbecich/fpinscala
exercises/src/main/scala/fpinscala/streamingio/StreamingIO.scala
Scala
mit
27,160
package io.soheila.um.vos.accounts import play.api.libs.json.Json /** * The form data. * * @param firstName The first name of a user. * @param lastName The last name of a user. * @param email The email of the user. * @param password The password of the user. */ case class SignUpVO( firstName: String, lastName: String, email: String, password: String ) /** * The companion object. */ object SignUpVO { /** * Converts the [Date] object to Json and vice versa. */ implicit val jsonFormat = Json.format[SignUpVO] }
esfand-r/soheila-um
src/main/scala/io/soheila/um/vos/accounts/SignUpVO.scala
Scala
apache-2.0
544
package de.tuda.stg import android.os.Bundle import android.support.v7.app.AppCompatActivity import android.util.Log import rescala._ import reandroidthings._ class MainActivity extends AppCompatActivity { private val TAG = "Barometer4Android" implicit val context = this override def onCreate(savedInstanceState: Bundle): Unit = { super.onCreate(savedInstanceState) Log.d(TAG, "Started ReSensor MainActivity") ReSensorManager.init(this.getApplicationContext) // get temperature sensor (requires cast) val temperatureSensor: ReSensor[Float] = ReSensorManager.getSensor(ReSensor.TypeDynamicSensorMetaTemperatureDescriptor) ReAlphaNumericDisplay.init(temperatureSensor.valueChanged) } override def onDestroy(): Unit = { super.onDestroy() Log.d(TAG, "Destroying ReSensor MainActivity") destroy() } def destroy(): Unit = { // remove sensors ReSensorManager.removeSensors() // turn off display ReAlphaNumericDisplay.destroy() } }
volkc/REScala
Examples/Barometer4Android/src/main/scala/de/tuda/stg/MainActivity.scala
Scala
apache-2.0
1,005
/* Copyright 2014 Nest Labs Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nest.sparkle.loader.kafka import kafka.serializer.Decoder object KafkaDecoders { object Implicits { implicit object StringDecoder extends Decoder[String] { def fromBytes(bytes: Array[Byte]): String = new String(bytes) } } }
mighdoll/sparkle
kafka/src/main/scala/nest/sparkle/loader/kafka/KafkaDecoders.scala
Scala
apache-2.0
840
package blended.streams.transaction import java.util.Date import blended.streams.message.{FlowEnvelope, FlowMessage} import blended.streams.transaction.FlowTransaction.{envelope2Transaction, transaction2envelope} import blended.streams.worklist._ import blended.streams.{FlowHeaderConfig, transaction} import blended.testsupport.scalatest.LoggingFreeSpec import com.typesafe.config.ConfigFactory import org.scalatest.matchers.should.Matchers import scala.jdk.CollectionConverters._ import scala.util.Try class FlowTransactionSpec extends LoggingFreeSpec with Matchers { private val branchCount = 10 val main = FlowEnvelope(FlowMessage.noProps) // create a sample transaction with n started branches private def sampleTransAction(branchCount : Int, state: WorklistState = WorklistStateStarted) : Try[FlowTransaction] = Try { val branches : Seq[String] = 1.to(branchCount).map { i => s"$i" } val event = FlowTransaction.startEvent(Some(main)) val now : Date = new Date() val t = FlowTransaction( created = now, lastUpdate = now, id = event.transactionId, creationProps = event.properties ) t.updateTransaction(FlowTransactionUpdate(t.tid, FlowMessage.noProps, state, branches:_*)) } private val cfg : FlowHeaderConfig = FlowHeaderConfig.create(ConfigFactory.parseMap( Map( "prefix" -> "App", "transactionId" -> "AppFlowTransId", "transactionState" -> "AppFlowTransState", "branchId" -> "AppFlowBranch" ).asJava )) "A FlowTransaction should" - { "a started transaction that recieves another started event as update should be in updated state" in { val t = FlowTransaction(None) t.worklist should be (empty) t.state should be (FlowTransactionStateStarted) val evt : FlowTransactionEvent = FlowTransactionStarted(t.tid, t.creationProps) val t2 : FlowTransaction = t.updateTransaction(evt) t2.created should be (t.created) t2.state should be (FlowTransactionStateUpdated) } "have an empty worklist after being created" in { val t = FlowTransaction(None) t.worklist should be (empty) t.state should be (FlowTransactionStateStarted) } "reflect the envelope id as transaction id if created with an envelope" in { FlowTransaction(Some(main)).tid should be(main.id) } "a started transaction with n started branches should be in Updated state" in { val t = sampleTransAction(branchCount).get t.state should be (FlowTransactionStateUpdated) t.worklist should have size branchCount } "a started transaction with c/n branches completed should be in Updated state for c < n" in { val t = sampleTransAction(branchCount).get val u = t.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateCompleted, "5") ) u.state should be (FlowTransactionStateUpdated) u.worklist should have size branchCount } "a branch within a started transaction requires a started AND a completion update to complete" in { val t = sampleTransAction(1).get val u = t.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateCompleted, "1") ) u.state should be (FlowTransactionStateCompleted) u.worklist should have size 1 val t2 = sampleTransAction(1, WorklistStateCompleted).get t2.state should be (FlowTransactionStateUpdated) val u2 = t2.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateStarted, "1") ) u2.state should be (FlowTransactionStateCompleted) u2.worklist should have size 1 } "a started transaction with all branches completed should be in completed state" in { val t = sampleTransAction(branchCount).get val branches = 1.to(t.worklist.size).map(i => s"$i") val u = t.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateCompleted, branches:_*) ) u.state should be (FlowTransactionStateCompleted) u.worklist should have size branchCount } "a started transaction with one branch having failed should be in failed state" in { val t = sampleTransAction(branchCount).get val u = t.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateFailed, "5") ) u.state should be (FlowTransactionStateFailed) u.worklist should have size 10 } "a started transaction with one branch having timed out should be in failed state" in { val t = sampleTransAction(branchCount).get val u = t.updateTransaction( FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateTimeout, "5") ) u.state should be (FlowTransactionStateFailed) u.worklist should have size 10 } "complete upon an update with complete regardless of the current worklist" in { val env = FlowEnvelope(FlowMessage.noProps) val t = FlowTransaction(Some(env)) val u = t.updateTransaction(FlowTransactionCompleted(t.tid, FlowMessage.noProps)) u.state should be (FlowTransactionStateCompleted) u.worklist should be (empty) } "remain unchanged once it has reached 'completed'" in { val env : FlowEnvelope = FlowEnvelope(FlowMessage.noProps) val t : FlowTransaction = FlowTransaction(Some(env)) val u : FlowTransaction = t.updateTransaction(FlowTransactionCompleted(t.tid, FlowMessage.noProps)) u.state should be (FlowTransactionStateCompleted) u.updateTransaction(FlowTransactionFailed(u.tid, u.creationProps, None)).state should be(FlowTransactionStateCompleted) u.updateTransaction(FlowTransactionCompleted(u.tid, u.creationProps)).state should be(FlowTransactionStateCompleted) u.updateTransaction(FlowTransactionStarted(u.tid, u.creationProps)).state should be(FlowTransactionStateCompleted) u.updateTransaction(FlowTransactionUpdate(u.tid, u.creationProps, WorklistStateFailed)).state should be (FlowTransactionStateCompleted) } "remain unchanged once it has reached 'failed'" in { val env : FlowEnvelope = FlowEnvelope(FlowMessage.noProps) val t : FlowTransaction = FlowTransaction(Some(env)) val u : FlowTransaction = t.updateTransaction(FlowTransactionFailed(t.tid, FlowMessage.noProps, None)) u.state should be (FlowTransactionStateFailed) u.updateTransaction(FlowTransactionFailed(u.tid, u.creationProps, None)).state should be(FlowTransactionStateFailed) u.updateTransaction(FlowTransactionCompleted(u.tid, u.creationProps)).state should be(FlowTransactionStateFailed) u.updateTransaction(FlowTransactionStarted(u.tid, u.creationProps)).state should be(FlowTransactionStateFailed) u.updateTransaction(FlowTransactionUpdate(u.tid, u.creationProps, WorklistStateFailed)).state should be (FlowTransactionStateFailed) } "can be transformed into a FlowEnvelope and vice versa" in { def singleTest(t: FlowTransaction): Unit = { val envelope = transaction2envelope(cfg)(t) val t2 = envelope2Transaction(cfg)(envelope) t2.tid should be(t.tid) t2.state should be(t.state) t2.created should be(t.created) t2.lastUpdate should be(t.lastUpdate) assert(t.worklist.forall { case (k, v) => t2.worklist(k) == v }) } singleTest(FlowTransaction(Some(FlowEnvelope()))) singleTest(sampleTransAction(branchCount).get) val t = sampleTransAction(branchCount).get t.updateTransaction(FlowTransactionUpdate(t.tid, FlowMessage.noProps, WorklistStateCompleted, "5")) singleTest(t) val t2 = sampleTransAction(branchCount).get t.updateTransaction(transaction.FlowTransactionCompleted(t.tid, FlowMessage.noProps)) singleTest(t2) } } }
woq-blended/blended
blended.streams/src/test/scala/blended/streams/transaction/FlowTransactionSpec.scala
Scala
apache-2.0
7,895
/* * Copyright 2010-2011 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package http package provider package servlet package containers import javax.servlet.http.HttpServletRequest import net.liftweb.common._ import net.liftweb.http._ import net.liftweb.http.provider._ import net.liftweb.http.provider.servlet._ import net.liftweb.util._ import Helpers._ object Jetty7AsyncProvider extends AsyncProviderMeta { // contSupport below gets inferred as a Class[?0] existential. import scala.language.existentials private val (hasContinuations_?, contSupport, getContinuation, getAttribute, setAttribute, suspendMeth, setTimeout, resumeMeth, isExpired, isResumed) = { try { val cc = Class.forName("org.eclipse.jetty.continuation.ContinuationSupport") val meth = cc.getMethod("getContinuation", classOf[javax.servlet.ServletRequest]) val cci = Class.forName("org.eclipse.jetty.continuation.Continuation") val getAttribute = cci.getMethod("getAttribute", classOf[String]) val setAttribute = cci.getMethod("setAttribute", classOf[String], classOf[AnyRef]) val suspend = cci.getMethod("suspend") val setTimeout = cci.getMethod("setTimeout", java.lang.Long.TYPE) val resume = cci.getMethod("resume") val isExpired = cci.getMethod("isExpired") val isResumed = cci.getMethod("isResumed") (true, (cc), (meth), (getAttribute), (setAttribute), (suspend), setTimeout, resume, isExpired, isResumed) } catch { case e: Exception => (false, null, null, null, null, null, null, null, null, null) } } def suspendResumeSupport_? : Boolean = hasContinuations_? /** * return a function that vends the ServletAsyncProvider */ def providerFunction: Box[HTTPRequest => ServletAsyncProvider] = Full(req => new Jetty7AsyncProvider(req)). filter(i => suspendResumeSupport_?) } /** * Jetty7AsyncProvider * * Implemented by using Jetty 7 Continuation API * */ class Jetty7AsyncProvider(req: HTTPRequest) extends ServletAsyncProvider { import Jetty7AsyncProvider._ private val servletReq = (req.asInstanceOf[HTTPRequestServlet]).req def suspendResumeSupport_? : Boolean = hasContinuations_? def resumeInfo: Option[(Req, LiftResponse)] = if (!hasContinuations_?) None else if (Props.inGAE) None else { val cont = getContinuation.invoke(contSupport, servletReq) val ret = getAttribute.invoke(cont, "__liftCometState") try { setAttribute.invoke(cont, "__liftCometState", null) ret match { case (r: Req, lr: LiftResponse) => Some(r -> lr) case _ => None } } catch { case e: Exception => None } } def suspend(timeout: Long): RetryState.Value = { val cont = getContinuation.invoke(contSupport, servletReq) val expired = isExpired.invoke(cont).asInstanceOf[Boolean] val resumed = isResumed.invoke(cont).asInstanceOf[Boolean] if (expired) RetryState.TIMED_OUT else if (resumed) RetryState.RESUMED else { setTimeout.invoke(cont, new java.lang.Long(timeout)) suspendMeth.invoke(cont) RetryState.SUSPENDED } } def resume(what: (Req, LiftResponse)): Boolean = { val cont = getContinuation.invoke(contSupport, servletReq) try { setAttribute.invoke(cont, "__liftCometState", what) resumeMeth.invoke(cont) true } catch { case e: Exception => setAttribute.invoke(cont, "__liftCometState", null) false } } }
lzpfmh/framework-2
web/webkit/src/main/scala/net/liftweb/http/provider/servlet/containers/Jetty7AsyncProvider.scala
Scala
apache-2.0
4,237
package com.twitter.diffy.proxy import javax.inject.Singleton import com.google.inject.Provides import com.twitter.diffy.analysis._ import com.twitter.diffy.lifter.Message import com.twitter.finagle._ import com.twitter.inject.TwitterModule import com.twitter.logging.Logger import com.twitter.util._ object DifferenceProxyModule extends TwitterModule { @Provides @Singleton def providesDifferenceProxy( settings: Settings, collector: InMemoryDifferenceCollector, joinedDifferences: JoinedDifferences, analyzer: DifferenceAnalyzer ): DifferenceProxy = settings.protocol match { case "thrift" => ThriftDifferenceProxy(settings, collector, joinedDifferences, analyzer) case "http" => SimpleHttpDifferenceProxy(settings, collector, joinedDifferences, analyzer) } } object DifferenceProxy { object NoResponseException extends Exception("No responses provided by diffy") val NoResponseExceptionFuture = Future.exception(NoResponseException) val log = Logger(classOf[DifferenceProxy]) } trait DifferenceProxy { import DifferenceProxy._ type Req type Rep type Srv <: ClientService[Req, Rep] val server: ListeningServer val settings: Settings var lastReset: Time = Time.now def serviceFactory(serverset: String, label: String): Srv def liftRequest(req: Req): Future[Message] def liftResponse(rep: Try[Rep]): Future[Message] // Clients for services val candidate = serviceFactory(settings.candidate.path, "candidate") val primary = serviceFactory(settings.primary.path, "primary") val secondary = serviceFactory(settings.secondary.path, "secondary") val collector: InMemoryDifferenceCollector val joinedDifferences: JoinedDifferences val analyzer: DifferenceAnalyzer private[this] lazy val multicastHandler = new SequentialMulticastService(Seq(primary.client, candidate.client, secondary.client)) def proxy = new Service[Req, Rep] { override def apply(req: Req): Future[Rep] = { val rawResponses = multicastHandler(req) respond { case Return(_) => log.debug("success networking") case Throw(t) => log.debug(t, "error networking") } val responses: Future[Seq[Message]] = rawResponses flatMap { reps => Future.collect(reps map liftResponse) respond { case Return(rs) => log.debug(s"success lifting ${rs.head.endpoint}") case Throw(t) => log.debug(t, "error lifting") } } responses foreach { case Seq(primaryResponse, candidateResponse, secondaryResponse) => liftRequest(req) respond { case Return(m) => log.debug(s"success lifting request for ${m.endpoint}") case Throw(t) => log.debug(t, "error lifting request") } foreach { req => analyzer(req, candidateResponse, primaryResponse, secondaryResponse) } } NoResponseExceptionFuture } } def clear() = { lastReset = Time.now analyzer.clear() } }
camiloribeiro/diffy
src/main/scala/com/twitter/diffy/proxy/DifferenceProxy.scala
Scala
apache-2.0
3,047
package io.rout import com.twitter.concurrent.AsyncStream import com.twitter.finagle.http.{Response, Status, Version} import com.twitter.io.{Buf, Charsets} import shapeless._ import io.rout.contentTypes._ /** * Represents a conversion from `A` to [[Response]]. */ trait ToResponse[A] { type ContentType <: String def apply(a: A): Response } trait LowPriorityToResponseInstances { type Aux[A, CT] = ToResponse[A] { type ContentType = CT } def instance[A, CT <: String](fn: A => Response): Aux[A, CT] = new ToResponse[A] { type ContentType = CT def apply(a: A): Response = fn(a) } private[this] def asyncStreamResponseBuilder[A, CT <: String](writer: A => Buf)(implicit w: Witness.Aux[CT] ): Aux[AsyncStream[A], CT] = instance { as => val rep = Response() rep.setChunked(true) val writable = rep.writer as.foreachF(chunk => writable.write(writer(chunk))).ensure(writable.close()) rep.contentType = w.value rep } implicit def asyncBufToResponse[CT <: String](implicit w: Witness.Aux[CT] ): Aux[AsyncStream[Buf], CT] = asyncStreamResponseBuilder(identity) private[this] val newLine: Buf = Buf.Utf8("\n") implicit def jsonAsyncStreamToResponse[A](implicit e: Encode.ApplicationJson[A], w: Witness.Aux[Application.Json] ): Aux[AsyncStream[A], Application.Json] = asyncStreamResponseBuilder(a => e(a).concat(newLine)) implicit def htmlAsyncStreamToResponse[A](implicit e: Encode.TextHtml[A] ): Aux[AsyncStream[A], Text.Html] = asyncStreamResponseBuilder(a => e(a).concat(newLine)) } trait HighPriorityToResponseInstances extends LowPriorityToResponseInstances { private[this] def bufToResponse(buf: Buf, ct: String): Response = { val rep = Response() if (!buf.isEmpty) { rep.content = buf rep.contentType = ct } rep } implicit def valueToResponse[A, CT <: String](implicit e: Encode.Aux[A, CT], w: Witness.Aux[CT] ): Aux[A, CT] = instance(a => bufToResponse(e(a), w.value)) implicit def outputToResponse[A, CT <: String](implicit tr: ToResponse.Aux[A, CT], e: Encode.Aux[Exception, CT], w: Witness.Aux[CT] ): Aux[Output[A], CT] = instance { o => val rep = o match { case Output.Payload(v, _) => tr(v) case Output.Failure(x, _) => bufToResponse(e(x), w.value) case Output.Empty(_) => Response() } rep.status = o.status o.headers.foreach { case (k, v) => rep.headerMap.set(k, v) } o.cookies.foreach(rep.cookies.add) rep } } object ToResponse extends HighPriorityToResponseInstances { implicit def cnilToResponse[CT <: String]: Aux[CNil, CT] = instance(_ => Response(Version.Http10, Status.NotFound)) implicit def coproductToResponse[H, T <: Coproduct, CT <: String](implicit trH: ToResponse.Aux[H, CT], trT: ToResponse.Aux[T, CT] ): Aux[H :+: T, CT] = instance { case Inl(h) => trH(h) case Inr(t) => trT(t) } }
teodimoff/rOut
core/src/io/rout/internal/ToResponse.scala
Scala
apache-2.0
2,943
package org.vaadin.addons.rinne import com.vaadin.shared.ui.combobox.FilteringMode import org.scalatest.FunSpec class VComboBoxSpec extends FunSpec { describe("VComboBox") { describe("should allow to set") { it("inputPrompt") { val comboBox = new VComboBox assert(comboBox.inputPrompt === None) comboBox.inputPrompt = "test" assert(comboBox.inputPrompt === Some("test")) comboBox.inputPrompt = Some("test2") assert(comboBox.inputPrompt === Some("test2")) } it("textInputAllowed") { val comboBox = new VComboBox assert(comboBox.textInputAllowed === true) comboBox.textInputAllowed = false assert(comboBox.textInputAllowed === false) } it("filteringMode") { val comboBox = new VComboBox assert(comboBox.filteringMode === FilteringMode.STARTSWITH) comboBox.filteringMode = FilteringMode.CONTAINS assert(comboBox.filteringMode === FilteringMode.CONTAINS) } it("scrollToSelectedItem") { val comboBox = new VComboBox assert(comboBox.scrollToSelectedItem === true) comboBox.scrollToSelectedItem = false assert(comboBox.scrollToSelectedItem === false) } } describe("should addItemWithCaption") { val comboBox = new VComboBox comboBox.addItemWithCaption(Int.box(1), "test") assert(comboBox.getItemCaption(Int.box(1)) === "test") } } }
LukaszByczynski/rinne
src/test/scala/org/vaadin/addons/rinne/VComboBoxSpec.scala
Scala
apache-2.0
1,471
package akka.analytics.cassandra import scala.concurrent.duration._ import akka.actor.{ActorRef, ActorSystem, Props} import akka.analytics.cassandra.ProcessingSpec._ import akka.persistence.PersistentActor import akka.testkit.TestKit import com.typesafe.config.ConfigFactory import org.apache.spark.SparkConf import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers, WordSpecLike} object ProcessingSpec { val akkaConfig = ConfigFactory.parseString( """ |akka.persistence.journal.plugin = "cassandra-journal" |akka.persistence.snapshot-store.plugin = "cassandra-snapshot-store" |cassandra-journal.port = 9142 |cassandra-journal.target-partition-size = 3 |cassandra-snapshot-store.port = 9142 """.stripMargin) val sparkConfig = new SparkConf() .setAppName("CassandraExample") .setMaster("local[4]") .set("spark.cassandra.connection.host", "127.0.0.1") .set("spark.cassandra.connection.port", "9142") class ExampleActor(probe: ActorRef) extends PersistentActor { override val persistenceId: String = "test" override def receiveCommand: Receive = { case msg => persist(msg) { case evt => probe ! evt } } override def receiveRecover: Receive = { case _ => } } } abstract class ProcessingSpec[SC] extends TestKit(ActorSystem("test", akkaConfig)) with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { def underTest: SC def execute(sc: SC): Array[(JournalKey, Any)] override protected def beforeAll(): Unit = { CassandraServer.start(60.seconds) } override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) CassandraServer.stop() } "akka-analytics-cassandra" must { "expose journaled events as RDD" in { val actor = system.actorOf(Props(new ExampleActor(testActor))) val num = 10 1 to num foreach { i => actor ! s"A-${i}" } 1 to num foreach { i => expectMsg(20.seconds, s"A-${i}") } val actual = execute(underTest) val expected = 1 to num map { i => (JournalKey("test", (i - 1) / 3, i), s"A-${i}") } actual should be(expected) } } }
zapletal-martin/akka-analytics
akka-analytics-cassandra/src/test/scala/akka/analytics/cassandra/ProcessingSpec.scala
Scala
apache-2.0
2,211
package edu.cmu.lti.oaqa.cse.scala.configuration import java.util.Map.Entry import Implicits._ import net.liftweb.json.JsonAST.JValue import net.liftweb.json.JsonAST.JInt import net.liftweb.json.JsonAST.JString import net.liftweb.json.JsonAST.JBool import net.liftweb.json.JsonAST.JDouble import Parameters.Parameter import edu.cmu.lti.oaqa.cse.scala.configuration.Parameters._ sealed trait ConfExpr sealed trait ExecutableDescriptor sealed trait ExecutableConf extends ExecutableDescriptor with ConfExpr case class ConfigurationDescriptor(configuration: Configuration, `collection-reader`: CollectionReaderDescriptor, pipeline: List[PhaseDescriptor]) extends ConfExpr //, pipeline: List[PhaseDescriptor], consumers: List[ConsumerDescriptor]) case class Configuration(name: String = "default-config", author: String = "default-author") extends ConfExpr case class CollectionReaderDescriptor(`class`: String, params: Map[String, Parameter] = Map()) extends ParameterizedDescriptor(`class`, params) with ExecutableConf case class PhaseDescriptor(name: String, options: List[ComponentDescriptor]) extends ExecutableConf case class ComponentDescriptor(`class`: String, params: Map[String, Parameter] = Map() /*, `persistence-provider`: ComponentDescriptor = emptyComponent*/ ) extends ParameterizedDescriptor(`class`, params) with ExecutableConf case class ScoreDescriptor(cost: Double, benefit: Double) sealed abstract class ParameterizedDescriptor(`class`: String, parmeters: Map[String, Any]) { def get[T](key: String)(implicit m: scala.reflect.Manifest[T]): Option[T] = parmeters.restrictTo[T].get(key) match { case None => { println("key: " + key + " of type: " + m.erasure + " does not exist!"); None } case s => s } def get(key: String) = get[Any](key) //typesafe get methods, guaranteed to get a parameter of a given type // or nothing if it is not found def getInt(key: String) = get[IntegerParameter](key) def getDouble(key: String) = get[DoubleParameter](key) def getString(key: String) = get[StringParameter](key) def getBoolean(key: String) = get[BooleanParameter](key) def getMap(key: String) = get[Map[String, Parameter]](key) def getList(key: String) = get[List[Parameter]](key) }
oaqa/bagpipes-old
src/main/scala/edu/cmu/lti/oaqa/cse/scala/configuration/Configuration.scala
Scala
apache-2.0
2,237
package mesosphere.raml.backend.treehugger import mesosphere.raml.ir.StringT import treehugger.forest.Tree object StringVisitor { def visit(s: StringT): Seq[Tree] = Seq.empty[Tree] }
gsantovena/marathon
type-generator/src/main/scala/mesosphere/raml/backend/treehugger/StringVisitor.scala
Scala
apache-2.0
190
/* * Copyright 2013 Folker Bernitt * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.bernitt.scalamaildir trait Base64Util { val TARGET_CHARS: Array[Char] = Array( 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', ',') def isPlainChar(char: Char): Boolean = { if (char == '&' || char == '/' || char == '.') return false char >= ' ' && char <= '\u007F' } def isEscapeChar(chr: Char) = chr == '&' def isFinishEscapeChar(chr: Char) = chr == '-' }
fbernitt/mailbackup
src/main/scala/de/bernitt/scalamaildir/Base64Util.scala
Scala
apache-2.0
1,292
package org.scalacheck.ops import org.scalacheck.{Arbitrary, Gen} import org.scalatest.FreeSpec class ArbitraryAsGenSpec extends FreeSpec { private val it = classOf[ArbitraryAsGen].getSimpleName s"$it should implicitly convert a Gen to Arbitrary" in { def f(arb: Arbitrary[Char]): Unit = () f(Gen.numChar) } private def implicitGen(implicit gen: Gen[Char]): Unit = () s"$it should convert an implicit Arbitrary to an implicit Gen" in { pendingUntilFixed { assertCompiles("implicitGen") } } }
jeffmay/scalacheck-ops
core-1-13-test/src/test/scala/org/scalacheck/ops/ArbitraryAsGenSpec.scala
Scala
apache-2.0
532
package org.jetbrains.plugins.scala.lang.psi.impl.base.types import com.intellij.lang.ASTNode import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScMatchTypeCase, ScTypeElement} import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementImpl class ScMatchTypeCaseImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScMatchTypeCase { override def patternTypeElement: Option[ScTypeElement] = findChild[ScTypeElement] override def resultTypeElement: Option[ScTypeElement] = findChild[ScTypeElement] }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScMatchTypeCaseImpl.scala
Scala
apache-2.0
526
package com.github.mdr.mash.evaluator import com.github.mdr.mash.classes.{ BoundMethod, UserDefinedClass, UserDefinedMethod } import com.github.mdr.mash.compiler.DesugarHoles import com.github.mdr.mash.functions._ import com.github.mdr.mash.ns.os.PathClass import com.github.mdr.mash.os.linux.LinuxEnvironmentInteractions import com.github.mdr.mash.parser.AbstractSyntax._ import com.github.mdr.mash.parser.{ DocComment, QuotationType } import com.github.mdr.mash.runtime._ import scala.PartialFunction.condOpt import scala.util.control.NonFatal object EvaluationContext { val NotUsed = EvaluationContext(ScopeStack(List())) } case class EvaluationContext(scopeStack: ScopeStack, namespaceOpt: Option[Namespace] = None) object Evaluator extends EvaluatorHelper { private val environmentInteractions = LinuxEnvironmentInteractions def evaluate(expr: Expr)(implicit context: EvaluationContext): MashValue = { try { ExecutionContext.checkInterrupted() val simpleResult = simpleEvaluate(expr) ExecutionContext.checkInterrupted() val finalResult = expr match { case ident: Identifier ⇒ if (context.scopeStack.lookup(ident.name).exists(_.isSafe)) simpleResult else invokeNullaryFunctions(simpleResult, sourceLocation(expr)) case _: MemberExpr ⇒ invokeNullaryFunctions(simpleResult, sourceLocation(expr)) case _ ⇒ simpleResult } finalResult } catch { case e: EvaluatorException ⇒ throw e case EvaluationInterruptedException ⇒ throw EvaluationInterruptedException case e: InterruptedException ⇒ throw EvaluationInterruptedException case NonFatal(t) ⇒ throw EvaluatorException("Unexpected error in evaluation: " + t.toString, stack = sourceLocation(expr).toList.map(loc ⇒ StackTraceItem(Some(loc))), cause = t) } } /** * If the given value is a function or bound method that allows nullary invocation, invoke it immediately and * return the result; otherwise, return the input value unchanged. */ def invokeNullaryFunctions(value: MashValue, locationOpt: Option[SourceLocation]): MashValue = value match { case f: MashFunction if f.allowsNullary ⇒ InvocationEvaluator.addInvocationToStackOnException(locationOpt, Some(f)) { f.callNullary() } case bm@BoundMethod(target, method, _) if method.allowsNullary ⇒ InvocationEvaluator.addInvocationToStackOnException(locationOpt, Some(bm)) { method.callNullary(target) } case _ ⇒ value } /** * Evaluate the given expression. If the result is a function/bound method that allows a nullary call, it is not called. */ def simpleEvaluate(expr: Expr)(implicit context: EvaluationContext): MashValue = expr match { case Hole(_, _) | PipeExpr(_, _, _) | HeadlessMemberExpr(_, _, _) ⇒ // Should have been removed from the AST by now throw EvaluatorException("Unexpected AST node: " + expr, sourceLocation(expr)) case thisExpr: ThisExpr ⇒ evaluateThisExpr(thisExpr) case interpolatedString: InterpolatedString ⇒ evaluateInterpolatedString(interpolatedString) case ParenExpr(body, _) ⇒ evaluate(body) case blockExpr: BlockExpr ⇒ evaluateBlockExpr(blockExpr) case Literal(v, _) ⇒ v case memberExpr: MemberExpr ⇒ MemberEvaluator.evaluateMemberExpr(memberExpr, invokeNullaryWhenVectorising = true).result case lookupExpr: LookupExpr ⇒ LookupEvaluator.evaluateLookupExpr(lookupExpr) case invocationExpr: InvocationExpr ⇒ InvocationEvaluator.evaluateInvocationExpr(invocationExpr) case LambdaExpr(params, body, _) ⇒ makeAnonymousFunction(params, body) case binOp: BinOpExpr ⇒ BinaryOperatorEvaluator.evaluateBinOpExpr(binOp) case chainedOpExpr: ChainedOpExpr ⇒ BinaryOperatorEvaluator.evaluateChainedOp(chainedOpExpr) case assExpr: AssignmentExpr ⇒ AssignmentEvaluator.evaluateAssignment(assExpr) case assExpr: PatternAssignmentExpr ⇒ AssignmentEvaluator.evaluatePatternAssignment(assExpr) case ifExpr: IfExpr ⇒ evaluateIfExpr(ifExpr) case ListExpr(elements, _) ⇒ MashList(elements.map(evaluate(_))) case mishExpr: MishExpr ⇒ MishEvaluator.evaluateMishExpr(mishExpr) case expr: MishInterpolation ⇒ MishEvaluator.evaluateMishInterpolation(expr) case MishFunction(command, _) ⇒ SystemCommandFunction(command) case decl: FunctionDeclaration ⇒ evaluateFunctionDecl(decl) case decl: ClassDeclaration ⇒ evaluateClassDecl(decl) case helpExpr: HelpExpr ⇒ HelpEvaluator.evaluateHelpExpr(helpExpr) case StatementSeq(statements, _) ⇒ evaluateStatements(statements) case lit: StringLiteral ⇒ evaluateStringLiteral(lit) case MinusExpr(subExpr, _) ⇒ evaluateMinusExpr(subExpr) case identifier: Identifier ⇒ evaluateIdentifier(identifier) case objectExpr: ObjectExpr ⇒ evaluateObjectExpr(objectExpr) case importStatement: ImportStatement ⇒ evaluateImportStatement(importStatement) } private def evaluateImportStatement(importStatement: ImportStatement)(implicit context: EvaluationContext): MashValue = { val target = Evaluator.evaluate(importStatement.expr) importStatement.importNameOpt match { case Some(name) ⇒ MemberEvaluator.maybeLookupByString(target, name) match { case Some(value) ⇒ context.scopeStack.set(name, value) value case None ⇒ MemberEvaluator.throwCannotFindMemberException(target, name, sourceLocation(importStatement)) } case None ⇒ for { name ← MemberEvaluator.getMemberNames(target) value ← MemberEvaluator.maybeLookupByString(target, name, includeShyMembers = false) } context.scopeStack.set(name, value) MashUnit } } def evaluateObjectExpr(objectExpr: ObjectExpr)(implicit context: EvaluationContext): MashObject = { def getFieldName(fieldNameExpr: Expr): MashValue = fieldNameExpr match { case Identifier(name, _) if !name.startsWith(DesugarHoles.VariableNamePrefix) ⇒ MashString(name) case _ ⇒ evaluate(fieldNameExpr) } val fields = objectExpr.fields.map { case FullObjectEntry(field, value, _) ⇒ getFieldName(field) -> evaluate(value) case entry@ShorthandObjectEntry(field, sourceInfoOpt) ⇒ val evaluatedIdentifier = evaluateIdentifier(field, sourceInfoOpt.flatMap(_.locationOpt)) val finalResult = invokeNullaryFunctions(evaluatedIdentifier, sourceLocation(entry)) MashString(field) -> finalResult } MashObject.of(fields) } def evaluateBlockExpr(blockExpr: BlockExpr)(implicit context: EvaluationContext): MashValue = { val newContext = context.copy(scopeStack = context.scopeStack.withLeakyScope()) Evaluator.evaluate(blockExpr.expr)(newContext) } def evaluateThisExpr(thisExpr: ThisExpr)(implicit context: EvaluationContext): MashValue = context.scopeStack.thisOpt.getOrElse { throw EvaluatorException(s"No binding for 'this'", sourceLocation(thisExpr)) } def evaluateIdentifier(identifier: Identifier)(implicit context: EvaluationContext): MashValue = evaluateIdentifier(identifier.name, sourceLocation(identifier)) private def evaluateIdentifier(name: String, locationOpt: Option[SourceLocation])(implicit context: EvaluationContext): MashValue = context.scopeStack.lookup(name).getOrElse { val names = context.scopeStack.bindings.keys.toSeq throw EvaluatorException(s"No binding for '$name'${Suggestor.suggestionSuffix(names, name)}", locationOpt) }.value private def evaluateMinusExpr(subExpr: Expr)(implicit context: EvaluationContext): MashValue = evaluate(subExpr) match { case n: MashNumber ⇒ n.negate case x ⇒ throw EvaluatorException("Could not negate a value of type " + x.typeName, sourceLocation(subExpr)) } def evaluateStringLiteral(lit: StringLiteral): MashValue = { val StringLiteral(s, quotationType, tildePrefix, _) = lit val tagOpt = condOpt(quotationType) { case QuotationType.Double ⇒ PathClass } val detilded = if (tildePrefix) environmentInteractions.home + s else s MashString(detilded, tagOpt) } private def evaluateStatements(statements: Seq[Expr])(implicit context: EvaluationContext): MashValue = { var result: MashValue = MashUnit for (statement ← statements) result = evaluate(statement) result } private def evaluateFunctionDecl(decl: FunctionDeclaration)(implicit context: EvaluationContext): UserDefinedFunction = { val function = userDefinedFunction(decl) context.scopeStack.set(function.name, function) function } private def userDefinedFunction(decl: FunctionDeclaration)(implicit context: EvaluationContext): UserDefinedFunction = { val FunctionDeclaration(docCommentOpt, attributes, functionName, paramList, body, _) = decl evaluateAttributes(attributes) val params = parameterModel(paramList, Some(context), docCommentOpt) UserDefinedFunction(docCommentOpt, functionName, params, body, context, decl) } private def evaluateClassDecl(decl: ClassDeclaration)(implicit context: EvaluationContext): UserDefinedClass = { val ClassDeclaration(docCommentOpt, _, className, paramList, bodyOpt, _) = decl val params = parameterModel(paramList, Some(context), docCommentOpt) def makeMethod(decl: FunctionDeclaration)(implicit context: EvaluationContext): UserDefinedMethod = { val FunctionDeclaration(docCommentOpt, attributes, functionName, paramList, body, _) = decl val evaluatedAttributes = evaluateAttributes(attributes) val aliases = getAliases(evaluatedAttributes).distinct val isPrivate = evaluatedAttributes.exists(_.name == Attributes.Private) val methodParams = parameterModel(paramList, Some(context), docCommentOpt) UserDefinedMethod(docCommentOpt, functionName, methodParams, paramList, body, context, isPrivate, aliases, decl) } val methods = bodyOpt.map(_.methods).getOrElse(Seq()).map(makeMethod) val klass = UserDefinedClass(docCommentOpt, className, context.namespaceOpt, params, methods) context.scopeStack.set(className, klass) klass } private def getAliases(evaluatedAttributes: Seq[EvaluatedAttribute])(implicit context: EvaluationContext): Seq[String] = { for { attribute ← evaluatedAttributes.filter(_.name == Attributes.Alias) arguments = attribute.argumentsOpt getOrElse Seq() boundParams = AliasParameterModel.params.bindTo(Arguments(arguments), context) alias = boundParams.validateString(AliasParameterModel.Params.Name).s } yield alias } protected case class EvaluatedAttribute(name: String, argumentsOpt: Option[Seq[EvaluatedArgument[SuspendedMashValue]]]) protected def evaluateAttributes(attributes: Seq[Attribute])(implicit context: EvaluationContext): Seq[EvaluatedAttribute] = attributes.map { attribute ⇒ val argumentsOpt = attribute.argumentsOpt.map(arguments ⇒ arguments.map(InvocationEvaluator.evaluateArgument)) EvaluatedAttribute(attribute.name, argumentsOpt) } private def getShortFlag(evaluatedAttributes: Seq[EvaluatedAttribute])(implicit context: EvaluationContext): Option[Char] = { val ShortFlag = Parameter(Some("shortName")) val params = ParameterModel(ShortFlag) for { attribute ← evaluatedAttributes.find(_.name == Attributes.ShortFlag) arguments = attribute.argumentsOpt getOrElse Seq() boundParams = params.bindTo(Arguments(arguments), context) name = boundParams.validateString(ShortFlag).s } yield if (name.length == 1) name.head else boundParams.throwInvalidArgument(ShortFlag, s"Short flag must be a single character, but was '$name'") } def makeParameter(param: FunctionParam, evaluationContextOpt: Option[EvaluationContext] = None, docCommentOpt: Option[DocComment] = None): Parameter = { val FunctionParam(attributes, nameOpt, isVariadic, defaultExprOpt, patternOpt, _) = param val shortFlagOpt = evaluationContextOpt.flatMap { implicit evaluationContextOpt ⇒ getShortFlag(evaluateAttributes(attributes)) } val isLazy = attributes.exists(_.name == Attributes.Lazy) val isFlag = attributes.exists(_.name == Attributes.Flag) val variadicAtLeastOne = attributes.exists(_.name == Attributes.AtLeastOne) val variadicFlatten = attributes.exists(_.name == Attributes.Flatten) val isNamedArgsParam = attributes.exists(_.name == Attributes.NamedArgs) val isSafe = attributes.exists(_.name == Attributes.Safe) val defaultValueGeneratorOpt: Option[ValueGenerator] = defaultExprOpt.map(defaultExpr ⇒ (context: EvaluationContext) ⇒ evaluate(defaultExpr)(context)) val docSummaryOpt = for { name ← nameOpt docComment ← docCommentOpt paramComment ← docComment.getParamComment(name) } yield paramComment.summary Parameter(nameOpt, docSummaryOpt, defaultValueGeneratorOpt = defaultValueGeneratorOpt, shortFlagOpt = shortFlagOpt, isVariadic = isVariadic, isFlag = isFlag, isLazy = isLazy, isNamedArgsParam = isNamedArgsParam, variadicAtLeastOne = variadicAtLeastOne, variadicFlatten = variadicFlatten, isSafe = isSafe, patternOpt = patternOpt.map(makeParamPattern)) } private def makeParamEntry(entry: ObjectPatternEntry): ParamPattern.ObjectEntry = ParamPattern.ObjectEntry(entry.field, entry.valuePatternOpt map makeParamPattern) def makeParamPattern(pattern: Pattern): ParamPattern = pattern match { case ObjectPattern(entries, _) ⇒ ParamPattern.Object(entries.map(makeParamEntry)) case HolePattern(_) ⇒ ParamPattern.Hole case IdentPattern(identifier, _) ⇒ ParamPattern.Ident(identifier) case ListPattern(patterns, _) ⇒ ParamPattern.List(patterns.map(makeParamPattern)) } def parameterModel(paramList: ParamList, evaluationContextOpt: Option[EvaluationContext] = None, docCommentOpt: Option[DocComment] = None): ParameterModel = { val evaluationContext = evaluationContextOpt.getOrElse(EvaluationContext(ScopeStack(Nil))) val parameters: Seq[Parameter] = paramList.params.map(makeParameter(_, Some(evaluationContext), docCommentOpt)) for (context ← evaluationContextOpt) verifyParameters(paramList)(context) ParameterModel(parameters) } private def verifyParameters(paramList: ParamList)(implicit context: EvaluationContext) { val params = paramList.params if (params.count(_.isVariadic) > 1) throw EvaluatorException("Multiple variadic parameters are not allowed") for ((name, params) ← params.groupBy(_.nameOpt).collect { case (Some(name), ps) if ps.length > 1 ⇒ name -> ps }.headOption) throw EvaluatorException(s"Duplicate parameter $name", params.lastOption.flatMap(sourceLocation)) } private def evaluateInterpolatedString(interpolatedString: InterpolatedString)(implicit context: EvaluationContext): MashString = { val InterpolatedString(start, parts, end, _) = interpolatedString val chunks = MashString(start, PathClass) +: parts.map { case StringPart(s) ⇒ MashString(s, PathClass) case ExprPart(expr) ⇒ evaluate(expr) match { case ms: MashString ⇒ ms case x ⇒ MashString(ToStringifier.stringify(x)) } } :+ MashString(end, PathClass) chunks.reduce(_ + _) } private def makeAnonymousFunction(paramList: ParamList, body: Expr)(implicit context: EvaluationContext) = AnonymousFunction(parameterModel(paramList, Some(context)), body, context) private def evaluateIfExpr(ifExpr: IfExpr)(implicit context: EvaluationContext) = { val IfExpr(cond, body, elseOpt, _) = ifExpr val result = evaluate(cond) if (result.isTruthy) evaluate(body) else elseOpt.map(evaluate).getOrElse(MashUnit) } }
mdr/mash
src/main/scala/com/github/mdr/mash/evaluator/Evaluator.scala
Scala
mit
17,511
import _root_.io.gatling.core.scenario.Simulation import ch.qos.logback.classic.{Level, LoggerContext} import io.gatling.core.Predef._ import io.gatling.http.Predef._ import org.slf4j.LoggerFactory import scala.concurrent.duration._ /** * Performance test for the FileUploadExercise entity. */ class FileUploadExerciseGatlingTest extends Simulation { val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext] // Log all HTTP requests //context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE")) // Log failed HTTP requests //context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG")) val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080""" val httpConf = http .baseUrl(baseURL) .inferHtmlResources() .acceptHeader("*/*") .acceptEncodingHeader("gzip, deflate") .acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3") .connectionHeader("keep-alive") .userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0") .silentResources // Silence all resources like css or css so they don't clutter the results val headers_http = Map( "Accept" -> """application/json""" ) val headers_http_authentication = Map( "Content-Type" -> """application/json""", "Accept" -> """application/json""" ) val headers_http_authenticated = Map( "Accept" -> """application/json""", "Authorization" -> "${access_token}" ) val scn = scenario("Test the FileUploadExercise entity") .exec(http("First unauthenticated request") .get("/api/account") .headers(headers_http) .check(status.is(401)) ).exitHereIfFailed .pause(10) .exec(http("Authentication") .post("/api/authenticate") .headers(headers_http_authentication) .body(StringBody("""{"username":"admin", "password":"admin"}""")).asJson .check(header("Authorization").saveAs("access_token"))).exitHereIfFailed .pause(2) .exec(http("Authenticated request") .get("/api/account") .headers(headers_http_authenticated) .check(status.is(200))) .pause(10) .repeat(2) { exec(http("Get all fileUploadExercises") .get("/api/file-upload-exercises") .headers(headers_http_authenticated) .check(status.is(200))) .pause(10 seconds, 20 seconds) .exec(http("Create new fileUploadExercise") .post("/api/file-upload-exercises") .headers(headers_http_authenticated) .body(StringBody("""{ "id":null , "filePattern":"SAMPLE_TEXT" }""")).asJson .check(status.is(201)) .check(headerRegex("Location", "(.*)").saveAs("new_fileUploadExercise_url"))).exitHereIfFailed .pause(10) .repeat(5) { exec(http("Get created fileUploadExercise") .get("${new_fileUploadExercise_url}") .headers(headers_http_authenticated)) .pause(10) } .exec(http("Delete created fileUploadExercise") .delete("${new_fileUploadExercise_url}") .headers(headers_http_authenticated)) .pause(10) } val users = scenario("Users").exec(scn) setUp( users.inject(rampUsers(Integer.getInteger("users", 100)) during(Integer.getInteger("ramp", 1) minutes)) ).protocols(httpConf) }
ls1intum/ArTEMiS
src/test/gatling/user-files/simulations/FileUploadExerciseGatlingTest.scala
Scala
mit
3,641
package com.arcusys.valamis.web.servlet.public.parameters import com.arcusys.valamis.lesson.model.LessonType import com.arcusys.valamis.member.model.MemberTypes import com.arcusys.valamis.web.servlet.public.LessonServlet import com.arcusys.valamis.web.servlet.public.model.request.MemberRequest import org.json4s.JsonAST.JString import org.json4s.CustomSerializer /** * Created by pkornilov on 1/30/17. */ trait LessonParameters extends BaseParameters { self: LessonServlet => def lessonType = params.getAs[String]("lessonType") map { case "tincan" => LessonType.Tincan case "scorm" => LessonType.Scorm case tpe => haltWithBadRequest("Wrong lesson type: " + tpe) } def memberType = params.as[String]("memberType") match { case "role" => MemberTypes.Role case "user" => MemberTypes.User case "userGroup" => MemberTypes.UserGroup case "organization" => MemberTypes.Organization case v => haltWithBadRequest(s"Wrong memberType value: " + v) } def ratingScore = params.as[Double]("score") }
arcusys/Valamis
valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/public/parameters/LessonParameters.scala
Scala
gpl-3.0
1,039
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.view.testutil object Tpcds_1_4_QueryBatch { // should be random generated based on scale // RC=ulist(random(1, rowcount("store_sales")/5,uniform),5); val rc = Array(1000000, 1000000, 1000000, 1000000, 1000000) // Queries the TPCDS 1.4 queries using the qualifcations values in the templates. val tpcds1_4Queries = Seq( ("q1", """ | WITH customer_total_return AS | (SELECT sr_customer_sk AS ctr_customer_sk, sr_store_sk AS ctr_store_sk, | sum(sr_return_amt) AS ctr_total_return | FROM store_returns, date_dim | WHERE sr_returned_date_sk = d_date_sk AND d_year = 2000 | GROUP BY sr_customer_sk, sr_store_sk) | SELECT c_customer_id | FROM customer_total_return ctr1, store, customer | WHERE ctr1.ctr_total_return > | (SELECT avg(ctr_total_return)*1.2 | FROM customer_total_return ctr2 | WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk) | AND s_store_sk = ctr1.ctr_store_sk | AND s_state = 'TN' | AND ctr1.ctr_customer_sk = c_customer_sk | ORDER BY c_customer_id LIMIT 100 """.stripMargin), ("q2", """ | WITH wscs as | (SELECT sold_date_sk, sales_price | FROM (SELECT ws_sold_date_sk sold_date_sk, ws_ext_sales_price sales_price | FROM web_sales) x | UNION ALL | (SELECT cs_sold_date_sk sold_date_sk, cs_ext_sales_price sales_price | FROM catalog_sales)), | wswscs AS | (SELECT d_week_seq, | sum(case when (d_day_name='Sunday') then sales_price else null end) sun_sales, | sum(case when (d_day_name='Monday') then sales_price else null end) mon_sales, | sum(case when (d_day_name='Tuesday') then sales_price else null end) tue_sales, | sum(case when (d_day_name='Wednesday') then sales_price else null end) wed_sales, | sum(case when (d_day_name='Thursday') then sales_price else null end) thu_sales, | sum(case when (d_day_name='Friday') then sales_price else null end) fri_sales, | sum(case when (d_day_name='Saturday') then sales_price else null end) sat_sales | FROM wscs, date_dim | WHERE d_date_sk = sold_date_sk | GROUP BY d_week_seq) | SELECT d_week_seq1 | ,round(sun_sales1/sun_sales2,2) | ,round(mon_sales1/mon_sales2,2) | ,round(tue_sales1/tue_sales2,2) | ,round(wed_sales1/wed_sales2,2) | ,round(thu_sales1/thu_sales2,2) | ,round(fri_sales1/fri_sales2,2) | ,round(sat_sales1/sat_sales2,2) | FROM | (SELECT wswscs.d_week_seq d_week_seq1 | ,sun_sales sun_sales1 | ,mon_sales mon_sales1 | ,tue_sales tue_sales1 | ,wed_sales wed_sales1 | ,thu_sales thu_sales1 | ,fri_sales fri_sales1 | ,sat_sales sat_sales1 | FROM wswscs,date_dim | WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001) y, | (SELECT wswscs.d_week_seq d_week_seq2 | ,sun_sales sun_sales2 | ,mon_sales mon_sales2 | ,tue_sales tue_sales2 | ,wed_sales wed_sales2 | ,thu_sales thu_sales2 | ,fri_sales fri_sales2 | ,sat_sales sat_sales2 | FROM wswscs, date_dim | WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001 + 1) z | WHERE d_week_seq1=d_week_seq2-53 | ORDER BY d_week_seq1 """.stripMargin), ("q3", """ | SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price) | sum_agg | FROM date_dim dt, store_sales, item | WHERE dt.d_date_sk = store_sales.ss_sold_date_sk | AND store_sales.ss_item_sk = item.i_item_sk | AND item.i_manufact_id = 128 | AND dt.d_moy=11 | GROUP BY dt.d_year, item.i_brand, item.i_brand_id | ORDER BY dt.d_year, sum_agg desc, brand_id | LIMIT 100 """.stripMargin), ("q4", """ |WITH year_total AS ( | SELECT c_customer_id customer_id, | c_first_name customer_first_name, | c_last_name customer_last_name, | c_preferred_cust_flag customer_preferred_cust_flag, | c_birth_country customer_birth_country, | c_login customer_login, | c_email_address customer_email_address, | d_year dyear, | sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt) | +ss_ext_sales_price)/2) year_total, | 's' sale_type | FROM customer, store_sales, date_dim | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk | GROUP BY c_customer_id, | c_first_name, | c_last_name, | c_preferred_cust_flag, | c_birth_country, | c_login, | c_email_address, | d_year | UNION ALL | SELECT c_customer_id customer_id, | c_first_name customer_first_name, | c_last_name customer_last_name, | c_preferred_cust_flag customer_preferred_cust_flag, | c_birth_country customer_birth_country, | c_login customer_login, | c_email_address customer_email_address, | d_year dyear, | sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt) | +cs_ext_sales_price)/2) ) year_total, | 'c' sale_type | FROM customer, catalog_sales, date_dim | WHERE c_customer_sk = cs_bill_customer_sk AND cs_sold_date_sk = d_date_sk | GROUP BY c_customer_id, | c_first_name, | c_last_name, | c_preferred_cust_flag, | c_birth_country, | c_login, | c_email_address, | d_year | UNION ALL | SELECT c_customer_id customer_id | ,c_first_name customer_first_name | ,c_last_name customer_last_name | ,c_preferred_cust_flag customer_preferred_cust_flag | ,c_birth_country customer_birth_country | ,c_login customer_login | ,c_email_address customer_email_address | ,d_year dyear | ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt) | +ws_ext_sales_price)/2) ) year_total | ,'w' sale_type | FROM customer, web_sales, date_dim | WHERE c_customer_sk = ws_bill_customer_sk AND ws_sold_date_sk = d_date_sk | GROUP BY c_customer_id, | c_first_name, | c_last_name, | c_preferred_cust_flag, | c_birth_country, | c_login, | c_email_address, | d_year) | SELECT | t_s_secyear.customer_id, | t_s_secyear.customer_first_name, | t_s_secyear.customer_last_name, | t_s_secyear.customer_preferred_cust_flag, | t_s_secyear.customer_birth_country, | t_s_secyear.customer_login, | t_s_secyear.customer_email_address | FROM year_total t_s_firstyear, year_total t_s_secyear, year_total t_c_firstyear, | year_total t_c_secyear, year_total t_w_firstyear, year_total t_w_secyear | WHERE t_s_secyear.customer_id = t_s_firstyear.customer_id | and t_s_firstyear.customer_id = t_c_secyear.customer_id | and t_s_firstyear.customer_id = t_c_firstyear.customer_id | and t_s_firstyear.customer_id = t_w_firstyear.customer_id | and t_s_firstyear.customer_id = t_w_secyear.customer_id | and t_s_firstyear.sale_type = 's' | and t_c_firstyear.sale_type = 'c' | and t_w_firstyear.sale_type = 'w' | and t_s_secyear.sale_type = 's' | and t_c_secyear.sale_type = 'c' | and t_w_secyear.sale_type = 'w' | and t_s_firstyear.dyear = 2001 | and t_s_secyear.dyear = 2001+1 | and t_c_firstyear.dyear = 2001 | and t_c_secyear.dyear = 2001+1 | and t_w_firstyear.dyear = 2001 | and t_w_secyear.dyear = 2001+1 | and t_s_firstyear.year_total > 0 | and t_c_firstyear.year_total > 0 | and t_w_firstyear.year_total > 0 | and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / | t_c_firstyear.year_total else null end | > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / | t_s_firstyear.year_total else null end | and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / | t_c_firstyear.year_total else null end | > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / | t_w_firstyear.year_total else null end | ORDER BY | t_s_secyear.customer_id, | t_s_secyear.customer_first_name, | t_s_secyear.customer_last_name, | t_s_secyear.customer_preferred_cust_flag, | t_s_secyear.customer_birth_country, | t_s_secyear.customer_login, | t_s_secyear.customer_email_address | LIMIT 100 """.stripMargin), // Modifications: "+ days" -> date_add // Modifications: "||" -> concat ("q5", """ | WITH ssr AS | (SELECT s_store_id, | sum(sales_price) as sales, | sum(profit) as profit, | sum(return_amt) as returns, | sum(net_loss) as profit_loss | FROM | (SELECT ss_store_sk as store_sk, | ss_sold_date_sk as date_sk, | ss_ext_sales_price as sales_price, | ss_net_profit as profit, | cast(0 as decimal(7,2)) as return_amt, | cast(0 as decimal(7,2)) as net_loss | FROM store_sales | UNION ALL | SELECT sr_store_sk as store_sk, | sr_returned_date_sk as date_sk, | cast(0 as decimal(7,2)) as sales_price, | cast(0 as decimal(7,2)) as profit, | sr_return_amt as return_amt, | sr_net_loss as net_loss | FROM store_returns) | salesreturns, date_dim, store | WHERE date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and ((cast('2000-08-23' as date) + interval 14 days)) | and store_sk = s_store_sk | GROUP BY s_store_id), | csr AS | (SELECT cp_catalog_page_id, | sum(sales_price) as sales, | sum(profit) as profit, | sum(return_amt) as returns, | sum(net_loss) as profit_loss | FROM | (SELECT cs_catalog_page_sk as page_sk, | cs_sold_date_sk as date_sk, | cs_ext_sales_price as sales_price, | cs_net_profit as profit, | cast(0 as decimal(7,2)) as return_amt, | cast(0 as decimal(7,2)) as net_loss | FROM catalog_sales | UNION ALL | SELECT cr_catalog_page_sk as page_sk, | cr_returned_date_sk as date_sk, | cast(0 as decimal(7,2)) as sales_price, | cast(0 as decimal(7,2)) as profit, | cr_return_amount as return_amt, | cr_net_loss as net_loss | from catalog_returns | ) salesreturns, date_dim, catalog_page | WHERE date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and ((cast('2000-08-23' as date) + interval 14 days)) | and page_sk = cp_catalog_page_sk | GROUP BY cp_catalog_page_id) | , | wsr AS | (SELECT web_site_id, | sum(sales_price) as sales, | sum(profit) as profit, | sum(return_amt) as returns, | sum(net_loss) as profit_loss | from | (select ws_web_site_sk as wsr_web_site_sk, | ws_sold_date_sk as date_sk, | ws_ext_sales_price as sales_price, | ws_net_profit as profit, | cast(0 as decimal(7,2)) as return_amt, | cast(0 as decimal(7,2)) as net_loss | from web_sales | union all | select ws_web_site_sk as wsr_web_site_sk, | wr_returned_date_sk as date_sk, | cast(0 as decimal(7,2)) as sales_price, | cast(0 as decimal(7,2)) as profit, | wr_return_amt as return_amt, | wr_net_loss as net_loss | FROM web_returns LEFT OUTER JOIN web_sales on | ( wr_item_sk = ws_item_sk | and wr_order_number = ws_order_number) | ) salesreturns, date_dim, web_site | WHERE date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and ((cast('2000-08-23' as date) + interval 14 days)) | and wsr_web_site_sk = web_site_sk | GROUP BY web_site_id) | SELECT channel, | id, | sum(sales) as sales, | sum(returns) as returns, | sum(profit) as profit | from | (select 'store channel' as channel, | concat('store', s_store_id) as id, | sales, | returns, | (profit - profit_loss) as profit | FROM ssr | UNION ALL | select 'catalog channel' as channel, | concat('catalog_page', cp_catalog_page_id) as id, | sales, | returns, | (profit - profit_loss) as profit | FROM csr | UNION ALL | SELECT 'web channel' as channel, | concat('web_site', web_site_id) as id, | sales, | returns, | (profit - profit_loss) as profit | FROM wsr | ) x | GROUP BY ROLLUP (channel, id) | ORDER BY channel, id | LIMIT 100 """.stripMargin), ("q6", """ | SELECT a.ca_state state, count(*) cnt | FROM | customer_address a, customer c, store_sales s, date_dim d, item i | WHERE a.ca_address_sk = c.c_current_addr_sk | AND c.c_customer_sk = s.ss_customer_sk | AND s.ss_sold_date_sk = d.d_date_sk | AND s.ss_item_sk = i.i_item_sk | AND d.d_month_seq = | (SELECT distinct (d_month_seq) FROM date_dim | WHERE d_year = 2000 AND d_moy = 1) | AND i.i_current_price > 1.2 * | (SELECT avg(j.i_current_price) FROM item j | WHERE j.i_category = i.i_category) | GROUP BY a.ca_state | HAVING count(*) >= 10 | ORDER BY cnt LIMIT 100 """.stripMargin), ("q7", """ | SELECT i_item_id, | avg(ss_quantity) agg1, | avg(ss_list_price) agg2, | avg(ss_coupon_amt) agg3, | avg(ss_sales_price) agg4 | FROM store_sales, customer_demographics, date_dim, item, promotion | WHERE ss_sold_date_sk = d_date_sk AND | ss_item_sk = i_item_sk AND | ss_cdemo_sk = cd_demo_sk AND | ss_promo_sk = p_promo_sk AND | cd_gender = 'M' AND | cd_marital_status = 'S' AND | cd_education_status = 'College' AND | (p_channel_email = 'N' or p_channel_event = 'N') AND | d_year = 2000 | GROUP BY i_item_id | ORDER BY i_item_id LIMIT 100 """.stripMargin), ("q8", """ | select s_store_name, sum(ss_net_profit) | from store_sales, date_dim, store, | (SELECT ca_zip | from ( | (SELECT substr(ca_zip,1,5) ca_zip FROM customer_address | WHERE substr(ca_zip,1,5) IN ( | '24128','76232','65084','87816','83926','77556','20548', | '26231','43848','15126','91137','61265','98294','25782', | '17920','18426','98235','40081','84093','28577','55565', | '17183','54601','67897','22752','86284','18376','38607', | '45200','21756','29741','96765','23932','89360','29839', | '25989','28898','91068','72550','10390','18845','47770', | '82636','41367','76638','86198','81312','37126','39192', | '88424','72175','81426','53672','10445','42666','66864', | '66708','41248','48583','82276','18842','78890','49448', | '14089','38122','34425','79077','19849','43285','39861', | '66162','77610','13695','99543','83444','83041','12305', | '57665','68341','25003','57834','62878','49130','81096', | '18840','27700','23470','50412','21195','16021','76107', | '71954','68309','18119','98359','64544','10336','86379', | '27068','39736','98569','28915','24206','56529','57647', | '54917','42961','91110','63981','14922','36420','23006', | '67467','32754','30903','20260','31671','51798','72325', | '85816','68621','13955','36446','41766','68806','16725', | '15146','22744','35850','88086','51649','18270','52867', | '39972','96976','63792','11376','94898','13595','10516', | '90225','58943','39371','94945','28587','96576','57855', | '28488','26105','83933','25858','34322','44438','73171', | '30122','34102','22685','71256','78451','54364','13354', | '45375','40558','56458','28286','45266','47305','69399', | '83921','26233','11101','15371','69913','35942','15882', | '25631','24610','44165','99076','33786','70738','26653', | '14328','72305','62496','22152','10144','64147','48425', | '14663','21076','18799','30450','63089','81019','68893', | '24996','51200','51211','45692','92712','70466','79994', | '22437','25280','38935','71791','73134','56571','14060', | '19505','72425','56575','74351','68786','51650','20004', | '18383','76614','11634','18906','15765','41368','73241', | '76698','78567','97189','28545','76231','75691','22246', | '51061','90578','56691','68014','51103','94167','57047', | '14867','73520','15734','63435','25733','35474','24676', | '94627','53535','17879','15559','53268','59166','11928', | '59402','33282','45721','43933','68101','33515','36634', | '71286','19736','58058','55253','67473','41918','19515', | '36495','19430','22351','77191','91393','49156','50298', | '87501','18652','53179','18767','63193','23968','65164', | '68880','21286','72823','58470','67301','13394','31016', | '70372','67030','40604','24317','45748','39127','26065', | '77721','31029','31880','60576','24671','45549','13376', | '50016','33123','19769','22927','97789','46081','72151', | '15723','46136','51949','68100','96888','64528','14171', | '79777','28709','11489','25103','32213','78668','22245', | '15798','27156','37930','62971','21337','51622','67853', | '10567','38415','15455','58263','42029','60279','37125', | '56240','88190','50308','26859','64457','89091','82136', | '62377','36233','63837','58078','17043','30010','60099', | '28810','98025','29178','87343','73273','30469','64034', | '39516','86057','21309','90257','67875','40162','11356', | '73650','61810','72013','30431','22461','19512','13375', | '55307','30625','83849','68908','26689','96451','38193', | '46820','88885','84935','69035','83144','47537','56616', | '94983','48033','69952','25486','61547','27385','61860', | '58048','56910','16807','17871','35258','31387','35458', | '35576')) | INTERSECT | (select ca_zip | FROM | (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt | FROM customer_address, customer | WHERE ca_address_sk = c_current_addr_sk and | c_preferred_cust_flag='Y' | group by ca_zip | having count(*) > 10) A1) | ) A2 | ) V1 | where ss_store_sk = s_store_sk | and ss_sold_date_sk = d_date_sk | and d_qoy = 2 and d_year = 1998 | and (substr(s_zip,1,2) = substr(V1.ca_zip,1,2)) | group by s_store_name | order by s_store_name LIMIT 100 """.stripMargin), ("q9", s""" |select case when (select count(*) from store_sales | where ss_quantity between 1 and 20) > ${ rc(0) } | then (select avg(ss_ext_discount_amt) from store_sales | where ss_quantity between 1 and 20) | else (select avg(ss_net_paid) from store_sales | where ss_quantity between 1 and 20) end bucket1 , | case when (select count(*) from store_sales | where ss_quantity between 21 and 40) > ${ rc(1) } | then (select avg(ss_ext_discount_amt) from store_sales | where ss_quantity between 21 and 40) | else (select avg(ss_net_paid) from store_sales | where ss_quantity between 21 and 40) end bucket2, | case when (select count(*) from store_sales | where ss_quantity between 41 and 60) > ${ rc(2) } | then (select avg(ss_ext_discount_amt) from store_sales | where ss_quantity between 41 and 60) | else (select avg(ss_net_paid) from store_sales | where ss_quantity between 41 and 60) end bucket3, | case when (select count(*) from store_sales | where ss_quantity between 61 and 80) > ${ rc(3) } | then (select avg(ss_ext_discount_amt) from store_sales | where ss_quantity between 61 and 80) | else (select avg(ss_net_paid) from store_sales | where ss_quantity between 61 and 80) end bucket4, | case when (select count(*) from store_sales | where ss_quantity between 81 and 100) > ${ rc(4) } | then (select avg(ss_ext_discount_amt) from store_sales | where ss_quantity between 81 and 100) | else (select avg(ss_net_paid) from store_sales | where ss_quantity between 81 and 100) end bucket5 |from reason |where r_reason_sk = 1 """.stripMargin), ("q10", """ | select | cd_gender, cd_marital_status, cd_education_status, count(*) cnt1, | cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3, | cd_dep_count, count(*) cnt4, cd_dep_employed_count, count(*) cnt5, | cd_dep_college_count, count(*) cnt6 | from | customer c, customer_address ca, customer_demographics | where | c.c_current_addr_sk = ca.ca_address_sk and | ca_county in ('Rush County','Toole County','Jefferson County', | 'Dona Ana County','La Porte County') and | cd_demo_sk = c.c_current_cdemo_sk AND | exists (select * from store_sales, date_dim | where c.c_customer_sk = ss_customer_sk AND | ss_sold_date_sk = d_date_sk AND | d_year = 2002 AND | d_moy between 1 AND 1+3) AND | (exists (select * from web_sales, date_dim | where c.c_customer_sk = ws_bill_customer_sk AND | ws_sold_date_sk = d_date_sk AND | d_year = 2002 AND | d_moy between 1 AND 1+3) or | exists (select * from catalog_sales, date_dim | where c.c_customer_sk = cs_ship_customer_sk AND | cs_sold_date_sk = d_date_sk AND | d_year = 2002 AND | d_moy between 1 AND 1+3)) | group by cd_gender, | cd_marital_status, | cd_education_status, | cd_purchase_estimate, | cd_credit_rating, | cd_dep_count, | cd_dep_employed_count, | cd_dep_college_count | order by cd_gender, | cd_marital_status, | cd_education_status, | cd_purchase_estimate, | cd_credit_rating, | cd_dep_count, | cd_dep_employed_count, | cd_dep_college_count |LIMIT 100 """.stripMargin), ("q11", """ | with year_total as ( | select c_customer_id customer_id | ,c_first_name customer_first_name | ,c_last_name customer_last_name | ,c_preferred_cust_flag customer_preferred_cust_flag | ,c_birth_country customer_birth_country | ,c_login customer_login | ,c_email_address customer_email_address | ,d_year dyear | ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total | ,'s' sale_type | from customer, store_sales, date_dim | where c_customer_sk = ss_customer_sk | and ss_sold_date_sk = d_date_sk | group by c_customer_id | ,c_first_name | ,c_last_name | ,d_year | ,c_preferred_cust_flag | ,c_birth_country | ,c_login | ,c_email_address | ,d_year | union all | select c_customer_id customer_id | ,c_first_name customer_first_name | ,c_last_name customer_last_name | ,c_preferred_cust_flag customer_preferred_cust_flag | ,c_birth_country customer_birth_country | ,c_login customer_login | ,c_email_address customer_email_address | ,d_year dyear | ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total | ,'w' sale_type | from customer, web_sales, date_dim | where c_customer_sk = ws_bill_customer_sk | and ws_sold_date_sk = d_date_sk | group by | c_customer_id, c_first_name, c_last_name, c_preferred_cust_flag, c_birth_country, | c_login, c_email_address, d_year) | select | t_s_secyear.customer_preferred_cust_flag | from year_total t_s_firstyear | ,year_total t_s_secyear | ,year_total t_w_firstyear | ,year_total t_w_secyear | where t_s_secyear.customer_id = t_s_firstyear.customer_id | and t_s_firstyear.customer_id = t_w_secyear.customer_id | and t_s_firstyear.customer_id = t_w_firstyear.customer_id | and t_s_firstyear.sale_type = 's' | and t_w_firstyear.sale_type = 'w' | and t_s_secyear.sale_type = 's' | and t_w_secyear.sale_type = 'w' | and t_s_firstyear.dyear = 2001 | and t_s_secyear.dyear = 2001+1 | and t_w_firstyear.dyear = 2001 | and t_w_secyear.dyear = 2001+1 | and t_s_firstyear.year_total > 0 | and t_w_firstyear.year_total > 0 | and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / | t_w_firstyear.year_total else null end | > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / | t_s_firstyear.year_total else null end | order by t_s_secyear.customer_preferred_cust_flag | LIMIT 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q12", """ | select | i_item_desc, i_category, i_class, i_current_price, | sum(ws_ext_sales_price) as itemrevenue, | sum(ws_ext_sales_price)*100/sum(sum(ws_ext_sales_price)) over | (partition by i_class) as revenueratio | from | web_sales, item, date_dim | where | ws_item_sk = i_item_sk | and i_category in ('Sports', 'Books', 'Home') | and ws_sold_date_sk = d_date_sk | and d_date between cast('1999-02-22' as date) | and (cast('1999-02-22' as date) + interval 30 days) | group by | i_item_id, i_item_desc, i_category, i_class, i_current_price | order by | i_category, i_class, i_item_id, i_item_desc, revenueratio | LIMIT 100 """.stripMargin), ("q13", """ | select avg(ss_quantity) | ,avg(ss_ext_sales_price) | ,avg(ss_ext_wholesale_cost) | ,sum(ss_ext_wholesale_cost) | from store_sales | ,store | ,customer_demographics | ,household_demographics | ,customer_address | ,date_dim | where s_store_sk = ss_store_sk | and ss_sold_date_sk = d_date_sk and d_year = 2001 | and((ss_hdemo_sk=hd_demo_sk | and cd_demo_sk = ss_cdemo_sk | and cd_marital_status = 'M' | and cd_education_status = 'Advanced Degree' | and ss_sales_price between 100.00 and 150.00 | and hd_dep_count = 3 | )or | (ss_hdemo_sk=hd_demo_sk | and cd_demo_sk = ss_cdemo_sk | and cd_marital_status = 'S' | and cd_education_status = 'College' | and ss_sales_price between 50.00 and 100.00 | and hd_dep_count = 1 | ) or | (ss_hdemo_sk=hd_demo_sk | and cd_demo_sk = ss_cdemo_sk | and cd_marital_status = 'W' | and cd_education_status = '2 yr Degree' | and ss_sales_price between 150.00 and 200.00 | and hd_dep_count = 1 | )) | and((ss_addr_sk = ca_address_sk | and ca_country = 'United States' | and ca_state in ('TX', 'OH', 'TX') | and ss_net_profit between 100 and 200 | ) or | (ss_addr_sk = ca_address_sk | and ca_country = 'United States' | and ca_state in ('OR', 'NM', 'KY') | and ss_net_profit between 150 and 300 | ) or | (ss_addr_sk = ca_address_sk | and ca_country = 'United States' | and ca_state in ('VA', 'TX', 'MS') | and ss_net_profit between 50 and 250 | )) """.stripMargin), ("q14a", """ |with cross_items as | (select i_item_sk ss_item_sk | from item, | (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id | category_id | from store_sales, item iss, date_dim d1 | where ss_item_sk = iss.i_item_sk and ss_sold_date_sk = d1.d_date_sk | and d1.d_year between 1999 AND 1999 + 2 | intersect | select ics.i_brand_id, ics.i_class_id, ics.i_category_id | from catalog_sales, item ics, date_dim d2 | where cs_item_sk = ics.i_item_sk | and cs_sold_date_sk = d2.d_date_sk | and d2.d_year between 1999 AND 1999 + 2 | intersect | select iws.i_brand_id, iws.i_class_id, iws.i_category_id | from web_sales, item iws, date_dim d3 | where ws_item_sk = iws.i_item_sk | and ws_sold_date_sk = d3.d_date_sk | and d3.d_year between 1999 AND 1999 + 2) x | where i_brand_id = brand_id | and i_class_id = class_id | and i_category_id = category_id |), | avg_sales as | (select avg(quantity*list_price) average_sales | from ( | select ss_quantity quantity, ss_list_price list_price | from store_sales, date_dim | where ss_sold_date_sk = d_date_sk | and d_year between 1999 and 2001 | union all | select cs_quantity quantity, cs_list_price list_price | from catalog_sales, date_dim | where cs_sold_date_sk = d_date_sk | and d_year between 1999 and 1999 + 2 | union all | select ws_quantity quantity, ws_list_price list_price | from web_sales, date_dim | where ws_sold_date_sk = d_date_sk | and d_year between 1999 and 1999 + 2) x) | select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales) | from( | select 'store' channel, i_brand_id,i_class_id | ,i_category_id,sum(ss_quantity*ss_list_price) sales | , count(*) number_sales | from store_sales, item, date_dim | where ss_item_sk in (select ss_item_sk from cross_items) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_year = 1999+2 | and d_moy = 11 | group by i_brand_id,i_class_id,i_category_id | having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales) | union all | select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum | (cs_quantity*cs_list_price) sales, count(*) number_sales | from catalog_sales, item, date_dim | where cs_item_sk in (select ss_item_sk from cross_items) | and cs_item_sk = i_item_sk | and cs_sold_date_sk = d_date_sk | and d_year = 1999+2 | and d_moy = 11 | group by i_brand_id,i_class_id,i_category_id | having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales) | union all | select 'web' channel, i_brand_id,i_class_id,i_category_id, sum | (ws_quantity*ws_list_price) sales , count(*) number_sales | from web_sales, item, date_dim | where ws_item_sk in (select ss_item_sk from cross_items) | and ws_item_sk = i_item_sk | and ws_sold_date_sk = d_date_sk | and d_year = 1999+2 | and d_moy = 11 | group by i_brand_id,i_class_id,i_category_id | having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales) | ) y | group by rollup (channel, i_brand_id,i_class_id,i_category_id) | order by channel,i_brand_id,i_class_id,i_category_id | limit 100 """.stripMargin), ("q14b", """ | with cross_items as | (select i_item_sk ss_item_sk | from item, | (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id | category_id | from store_sales, item iss, date_dim d1 | where ss_item_sk = iss.i_item_sk | and ss_sold_date_sk = d1.d_date_sk | and d1.d_year between 1999 AND 1999 + 2 | intersect | select ics.i_brand_id, ics.i_class_id, ics.i_category_id | from catalog_sales, item ics, date_dim d2 | where cs_item_sk = ics.i_item_sk | and cs_sold_date_sk = d2.d_date_sk | and d2.d_year between 1999 AND 1999 + 2 | intersect | select iws.i_brand_id, iws.i_class_id, iws.i_category_id | from web_sales, item iws, date_dim d3 | where ws_item_sk = iws.i_item_sk | and ws_sold_date_sk = d3.d_date_sk | and d3.d_year between 1999 AND 1999 + 2) x | where i_brand_id = brand_id | and i_class_id = class_id | and i_category_id = category_id | ), | avg_sales as | (select avg(quantity*list_price) average_sales | from (select ss_quantity quantity, ss_list_price list_price | from store_sales, date_dim | where ss_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2 | union all | select cs_quantity quantity, cs_list_price list_price | from catalog_sales, date_dim | where cs_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2 | union all | select ws_quantity quantity, ws_list_price list_price | from web_sales, date_dim | where ws_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2) x) | select * from | (select 'store' channel, i_brand_id,i_class_id,i_category_id | ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales | from store_sales, item, date_dim | where ss_item_sk in (select ss_item_sk from cross_items) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_week_seq = (select d_week_seq from date_dim | where d_year = 1999 + 1 and d_moy = 12 and d_dom = 11) | group by i_brand_id,i_class_id,i_category_id | having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year, | (select 'store' channel, i_brand_id,i_class_id | ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales | from store_sales, item, date_dim | where ss_item_sk in (select ss_item_sk from cross_items) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_week_seq = (select d_week_seq from date_dim | where d_year = 1999 and d_moy = 12 and d_dom = 11) | group by i_brand_id,i_class_id,i_category_id | having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year | where this_year.i_brand_id= last_year.i_brand_id | and this_year.i_class_id = last_year.i_class_id | and this_year.i_category_id = last_year.i_category_id | order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year | .i_category_id | limit 100 """.stripMargin), ("q15", """ | select ca_zip, sum(cs_sales_price) | from catalog_sales, customer, customer_address, date_dim | where cs_bill_customer_sk = c_customer_sk | and c_current_addr_sk = ca_address_sk | and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', | '85392', '85460', '80348', '81792') | or ca_state in ('CA','WA','GA') | or cs_sales_price > 500) | and cs_sold_date_sk = d_date_sk | and d_qoy = 2 and d_year = 2001 | group by ca_zip | order by ca_zip | limit 100 """.stripMargin), // Modifications: " -> ` ("q16", """ | select | count(distinct cs_order_number) as `order count`, | sum(cs_ext_ship_cost) as `total shipping cost`, | sum(cs_net_profit) as `total net profit` | from | catalog_sales cs1, date_dim, customer_address, call_center | where | d_date between '2002-02-01' and (cast('2002-02-01' as date) + interval 60 days) | and cs1.cs_ship_date_sk = d_date_sk | and cs1.cs_ship_addr_sk = ca_address_sk | and ca_state = 'GA' | and cs1.cs_call_center_sk = cc_call_center_sk | and cc_county in ('Williamson County','Williamson County','Williamson County', | 'Williamson County', 'Williamson County') | and exists (select * | from catalog_sales cs2 | where cs1.cs_order_number = cs2.cs_order_number | and cs1.cs_warehouse_sk <> cs2.cs_warehouse_sk) | and not exists(select * | from catalog_returns cr1 | where cs1.cs_order_number = cr1.cr_order_number) | order by count(distinct cs_order_number) | limit 100 """.stripMargin), ("q17", """ | select i_item_id | ,i_item_desc | ,s_state | ,count(ss_quantity) as store_sales_quantitycount | ,avg(ss_quantity) as store_sales_quantityave | ,stddev_samp(ss_quantity) as store_sales_quantitystdev | ,stddev_samp(ss_quantity)/avg(ss_quantity) as store_sales_quantitycov | ,count(sr_return_quantity) as_store_returns_quantitycount | ,avg(sr_return_quantity) as_store_returns_quantityave | ,stddev_samp(sr_return_quantity) as_store_returns_quantitystdev | ,stddev_samp(sr_return_quantity)/avg(sr_return_quantity) as | store_returns_quantitycov | ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as | catalog_sales_quantityave | ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitystdev | ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitycov | from store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3, | store, item | where d1.d_quarter_name = '2001Q1' | and d1.d_date_sk = ss_sold_date_sk | and i_item_sk = ss_item_sk | and s_store_sk = ss_store_sk | and ss_customer_sk = sr_customer_sk | and ss_item_sk = sr_item_sk | and ss_ticket_number = sr_ticket_number | and sr_returned_date_sk = d2.d_date_sk | and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3') | and sr_customer_sk = cs_bill_customer_sk | and sr_item_sk = cs_item_sk | and cs_sold_date_sk = d3.d_date_sk | and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3') | group by i_item_id, i_item_desc, s_state | order by i_item_id, i_item_desc, s_state | limit 100 """.stripMargin), // Modifications: "numeric" -> "decimal" ("q18", """ | select i_item_id, | ca_country, | ca_state, | ca_county, | avg( cast(cs_quantity as decimal(12,2))) agg1, | avg( cast(cs_list_price as decimal(12,2))) agg2, | avg( cast(cs_coupon_amt as decimal(12,2))) agg3, | avg( cast(cs_sales_price as decimal(12,2))) agg4, | avg( cast(cs_net_profit as decimal(12,2))) agg5, | avg( cast(c_birth_year as decimal(12,2))) agg6, | avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7 | from catalog_sales, customer_demographics cd1, | customer_demographics cd2, customer, customer_address, date_dim, item | where cs_sold_date_sk = d_date_sk and | cs_item_sk = i_item_sk and | cs_bill_cdemo_sk = cd1.cd_demo_sk and | cs_bill_customer_sk = c_customer_sk and | cd1.cd_gender = 'F' and | cd1.cd_education_status = 'Unknown' and | c_current_cdemo_sk = cd2.cd_demo_sk and | c_current_addr_sk = ca_address_sk and | c_birth_month in (1,6,8,9,12,2) and | d_year = 1998 and | ca_state in ('MS','IN','ND','OK','NM','VA','MS') | group by rollup (i_item_id, ca_country, ca_state, ca_county) | order by ca_country, ca_state, ca_county, i_item_id | LIMIT 100 """.stripMargin), ("q19", """ | select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact, | sum(ss_ext_sales_price) ext_price | from date_dim, store_sales, item,customer,customer_address,store | where d_date_sk = ss_sold_date_sk | and ss_item_sk = i_item_sk | and i_manager_id = 8 | and d_moy = 11 | and d_year = 1998 | and ss_customer_sk = c_customer_sk | and c_current_addr_sk = ca_address_sk | and substr(ca_zip,1,5) <> substr(s_zip,1,5) | and ss_store_sk = s_store_sk | group by i_brand, i_brand_id, i_manufact_id, i_manufact | order by ext_price desc, brand, brand_id, i_manufact_id, i_manufact | limit 100 """.stripMargin), ("q20", """ |select i_item_desc | ,i_category | ,i_class | ,i_current_price | ,sum(cs_ext_sales_price) as itemrevenue | ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over | (partition by i_class) as revenueratio | from catalog_sales, item, date_dim | where cs_item_sk = i_item_sk | and i_category in ('Sports', 'Books', 'Home') | and cs_sold_date_sk = d_date_sk | and d_date between cast('1999-02-22' as date) | and (cast('1999-02-22' as date) + interval 30 days) | group by i_item_id, i_item_desc, i_category, i_class, i_current_price | order by i_category, i_class, i_item_id, i_item_desc, revenueratio | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q21", """ | select * from( | select w_warehouse_name, i_item_id, | sum(case when (cast(d_date as date) < cast ('2000-03-11' as date)) | then inv_quantity_on_hand | else 0 end) as inv_before, | sum(case when (cast(d_date as date) >= cast ('2000-03-11' as date)) | then inv_quantity_on_hand | else 0 end) as inv_after | from inventory, warehouse, item, date_dim | where i_current_price between 0.99 and 1.49 | and i_item_sk = inv_item_sk | and inv_warehouse_sk = w_warehouse_sk | and inv_date_sk = d_date_sk | and d_date between (cast('2000-03-11' as date) - interval 30 days) | and (cast('2000-03-11' as date) + interval 30 days) | group by w_warehouse_name, i_item_id) x | where (case when inv_before > 0 | then inv_after / inv_before | else null | end) between 2.0/3.0 and 3.0/2.0 | order by w_warehouse_name, i_item_id | limit 100 """.stripMargin), ("q22", """ | select i_product_name, i_brand, i_class, i_category, avg(inv_quantity_on_hand) qoh | from inventory, date_dim, item, warehouse | where inv_date_sk=d_date_sk | and inv_item_sk=i_item_sk | and inv_warehouse_sk = w_warehouse_sk | and d_month_seq between 1200 and 1200 + 11 | group by rollup(i_product_name, i_brand, i_class, i_category) | order by qoh, i_product_name, i_brand, i_class, i_category | limit 100 """.stripMargin), ("q23a", """ | with frequent_ss_items as | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt | from store_sales, date_dim, item | where ss_sold_date_sk = d_date_sk | and ss_item_sk = i_item_sk | and d_year in (2000, 2000+1, 2000+2,2000+3) | group by substr(i_item_desc,1,30),i_item_sk,d_date | having count(*) >4), | max_store_sales as | (select max(csales) tpcds_cmax | from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales | from store_sales, customer, date_dim | where ss_customer_sk = c_customer_sk | and ss_sold_date_sk = d_date_sk | and d_year in (2000, 2000+1, 2000+2,2000+3) | group by c_customer_sk) x), | best_ss_customer as | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales | from store_sales, customer | where ss_customer_sk = c_customer_sk | group by c_customer_sk | having sum(ss_quantity*ss_sales_price) > (50/100.0) * | (select * from max_store_sales)) | select sum(sales) | from ((select cs_quantity*cs_list_price sales | from catalog_sales, date_dim | where d_year = 2000 | and d_moy = 2 | and cs_sold_date_sk = d_date_sk | and cs_item_sk in (select item_sk from frequent_ss_items) | and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)) | union all | (select ws_quantity*ws_list_price sales | from web_sales, date_dim | where d_year = 2000 | and d_moy = 2 | and ws_sold_date_sk = d_date_sk | and ws_item_sk in (select item_sk from frequent_ss_items) | and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y | limit 100 """.stripMargin), ("q23b", """ | | with frequent_ss_items as | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt | from store_sales, date_dim, item | where ss_sold_date_sk = d_date_sk | and ss_item_sk = i_item_sk | and d_year in (2000, 2000+1, 2000+2,2000+3) | group by substr(i_item_desc,1,30),i_item_sk,d_date | having count(*) > 4), | max_store_sales as | (select max(csales) tpcds_cmax | from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales | from store_sales, customer, date_dim | where ss_customer_sk = c_customer_sk | and ss_sold_date_sk = d_date_sk | and d_year in (2000, 2000+1, 2000+2,2000+3) | group by c_customer_sk) x), | best_ss_customer as | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales | from store_sales | ,customer | where ss_customer_sk = c_customer_sk | group by c_customer_sk | having sum(ss_quantity*ss_sales_price) > (50/100.0) * | (select * from max_store_sales)) | select c_last_name,c_first_name,sales | from ((select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales | from catalog_sales, customer, date_dim | where d_year = 2000 | and d_moy = 2 | and cs_sold_date_sk = d_date_sk | and cs_item_sk in (select item_sk from frequent_ss_items) | and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer) | and cs_bill_customer_sk = c_customer_sk | group by c_last_name,c_first_name) | union all | (select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales | from web_sales, customer, date_dim | where d_year = 2000 | and d_moy = 2 | and ws_sold_date_sk = d_date_sk | and ws_item_sk in (select item_sk from frequent_ss_items) | and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer) | and ws_bill_customer_sk = c_customer_sk | group by c_last_name,c_first_name)) y | order by c_last_name,c_first_name,sales | limit 100 """.stripMargin), ("q24a", """ | with ssales as | (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color, | i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid | from store_sales, store_returns, store, item, customer, customer_address | where ss_ticket_number = sr_ticket_number | and ss_item_sk = sr_item_sk | and ss_customer_sk = c_customer_sk | and ss_item_sk = i_item_sk | and ss_store_sk = s_store_sk | and c_birth_country = upper(ca_country) | and s_zip = ca_zip | and s_market_id = 8 | group by c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color, | i_current_price, i_manager_id, i_units, i_size) | select c_last_name, c_first_name, s_store_name, sum(netpaid) paid | from ssales | where i_color = 'pale' | group by c_last_name, c_first_name, s_store_name | having sum(netpaid) > (select 0.05*avg(netpaid) from ssales) """.stripMargin), ("q24b", """ | with ssales as | (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color, | i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid | from store_sales, store_returns, store, item, customer, customer_address | where ss_ticket_number = sr_ticket_number | and ss_item_sk = sr_item_sk | and ss_customer_sk = c_customer_sk | and ss_item_sk = i_item_sk | and ss_store_sk = s_store_sk | and c_birth_country = upper(ca_country) | and s_zip = ca_zip | and s_market_id = 8 | group by c_last_name, c_first_name, s_store_name, ca_state, s_state, | i_color, i_current_price, i_manager_id, i_units, i_size) | select c_last_name, c_first_name, s_store_name, sum(netpaid) paid | from ssales | where i_color = 'chiffon' | group by c_last_name, c_first_name, s_store_name | having sum(netpaid) > (select 0.05*avg(netpaid) from ssales) """.stripMargin), ("q25", """ | select i_item_id, i_item_desc, s_store_id, s_store_name, | sum(ss_net_profit) as store_sales_profit, | sum(sr_net_loss) as store_returns_loss, | sum(cs_net_profit) as catalog_sales_profit | from | store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3, | store, item | where | d1.d_moy = 4 | and d1.d_year = 2001 | and d1.d_date_sk = ss_sold_date_sk | and i_item_sk = ss_item_sk | and s_store_sk = ss_store_sk | and ss_customer_sk = sr_customer_sk | and ss_item_sk = sr_item_sk | and ss_ticket_number = sr_ticket_number | and sr_returned_date_sk = d2.d_date_sk | and d2.d_moy between 4 and 10 | and d2.d_year = 2001 | and sr_customer_sk = cs_bill_customer_sk | and sr_item_sk = cs_item_sk | and cs_sold_date_sk = d3.d_date_sk | and d3.d_moy between 4 and 10 | and d3.d_year = 2001 | group by | i_item_id, i_item_desc, s_store_id, s_store_name | order by | i_item_id, i_item_desc, s_store_id, s_store_name | limit 100 """.stripMargin), ("q26", """ | select i_item_id, | avg(cs_quantity) agg1, | avg(cs_list_price) agg2, | avg(cs_coupon_amt) agg3, | avg(cs_sales_price) agg4 | from catalog_sales, customer_demographics, date_dim, item, promotion | where cs_sold_date_sk = d_date_sk and | cs_item_sk = i_item_sk and | cs_bill_cdemo_sk = cd_demo_sk and | cs_promo_sk = p_promo_sk and | cd_gender = 'M' and | cd_marital_status = 'S' and | cd_education_status = 'College' and | (p_channel_email = 'N' or p_channel_event = 'N') and | d_year = 2000 | group by i_item_id | order by i_item_id | limit 100 """.stripMargin), ("q27", """ | select i_item_id, | s_state, grouping(s_state) g_state, | avg(ss_quantity) agg1, | avg(ss_list_price) agg2, | avg(ss_coupon_amt) agg3, | avg(ss_sales_price) agg4 | from store_sales, customer_demographics, date_dim, store, item | where ss_sold_date_sk = d_date_sk and | ss_item_sk = i_item_sk and | ss_store_sk = s_store_sk and | ss_cdemo_sk = cd_demo_sk and | cd_gender = 'M' and | cd_marital_status = 'S' and | cd_education_status = 'College' and | d_year = 2002 and | s_state in ('TN','TN', 'TN', 'TN', 'TN', 'TN') | group by rollup (i_item_id, s_state) | order by i_item_id, s_state | limit 100 """.stripMargin), ("q28", """ | select * | from (select avg(ss_list_price) B1_LP | ,count(ss_list_price) B1_CNT | ,count(distinct ss_list_price) B1_CNTD | from store_sales | where ss_quantity between 0 and 5 | and (ss_list_price between 8 and 8+10 | or ss_coupon_amt between 459 and 459+1000 | or ss_wholesale_cost between 57 and 57+20)) B1, | (select avg(ss_list_price) B2_LP | ,count(ss_list_price) B2_CNT | ,count(distinct ss_list_price) B2_CNTD | from store_sales | where ss_quantity between 6 and 10 | and (ss_list_price between 90 and 90+10 | or ss_coupon_amt between 2323 and 2323+1000 | or ss_wholesale_cost between 31 and 31+20)) B2, | (select avg(ss_list_price) B3_LP | ,count(ss_list_price) B3_CNT | ,count(distinct ss_list_price) B3_CNTD | from store_sales | where ss_quantity between 11 and 15 | and (ss_list_price between 142 and 142+10 | or ss_coupon_amt between 12214 and 12214+1000 | or ss_wholesale_cost between 79 and 79+20)) B3, | (select avg(ss_list_price) B4_LP | ,count(ss_list_price) B4_CNT | ,count(distinct ss_list_price) B4_CNTD | from store_sales | where ss_quantity between 16 and 20 | and (ss_list_price between 135 and 135+10 | or ss_coupon_amt between 6071 and 6071+1000 | or ss_wholesale_cost between 38 and 38+20)) B4, | (select avg(ss_list_price) B5_LP | ,count(ss_list_price) B5_CNT | ,count(distinct ss_list_price) B5_CNTD | from store_sales | where ss_quantity between 21 and 25 | and (ss_list_price between 122 and 122+10 | or ss_coupon_amt between 836 and 836+1000 | or ss_wholesale_cost between 17 and 17+20)) B5, | (select avg(ss_list_price) B6_LP | ,count(ss_list_price) B6_CNT | ,count(distinct ss_list_price) B6_CNTD | from store_sales | where ss_quantity between 26 and 30 | and (ss_list_price between 154 and 154+10 | or ss_coupon_amt between 7326 and 7326+1000 | or ss_wholesale_cost between 7 and 7+20)) B6 | limit 100 """.stripMargin), ("q29", """ | select | i_item_id | ,i_item_desc | ,s_store_id | ,s_store_name | ,sum(ss_quantity) as store_sales_quantity | ,sum(sr_return_quantity) as store_returns_quantity | ,sum(cs_quantity) as catalog_sales_quantity | from | store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, | date_dim d3, store, item | where | d1.d_moy = 9 | and d1.d_year = 1999 | and d1.d_date_sk = ss_sold_date_sk | and i_item_sk = ss_item_sk | and s_store_sk = ss_store_sk | and ss_customer_sk = sr_customer_sk | and ss_item_sk = sr_item_sk | and ss_ticket_number = sr_ticket_number | and sr_returned_date_sk = d2.d_date_sk | and d2.d_moy between 9 and 9 + 3 | and d2.d_year = 1999 | and sr_customer_sk = cs_bill_customer_sk | and sr_item_sk = cs_item_sk | and cs_sold_date_sk = d3.d_date_sk | and d3.d_year in (1999,1999+1,1999+2) | group by | i_item_id, i_item_desc, s_store_id, s_store_name | order by | i_item_id, i_item_desc, s_store_id, s_store_name | limit 100 """.stripMargin), ("q30", """ | with customer_total_return as | (select wr_returning_customer_sk as ctr_customer_sk | ,ca_state as ctr_state, | sum(wr_return_amt) as ctr_total_return | from web_returns, date_dim, customer_address | where wr_returned_date_sk = d_date_sk | and d_year = 2002 | and wr_returning_addr_sk = ca_address_sk | group by wr_returning_customer_sk,ca_state) | select c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag | ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address | ,c_last_review_date,ctr_total_return | from customer_total_return ctr1, customer_address, customer | where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 | from customer_total_return ctr2 | where ctr1.ctr_state = ctr2.ctr_state) | and ca_address_sk = c_current_addr_sk | and ca_state = 'GA' | and ctr1.ctr_customer_sk = c_customer_sk | order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag | ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login, | c_email_address | ,c_last_review_date,ctr_total_return | limit 100 """.stripMargin), ("q31", """ | with ss as | (select ca_county,d_qoy, d_year,sum(ss_ext_sales_price) as store_sales | from store_sales,date_dim,customer_address | where ss_sold_date_sk = d_date_sk | and ss_addr_sk=ca_address_sk | group by ca_county,d_qoy, d_year), | ws as | (select ca_county,d_qoy, d_year,sum(ws_ext_sales_price) as web_sales | from web_sales,date_dim,customer_address | where ws_sold_date_sk = d_date_sk | and ws_bill_addr_sk=ca_address_sk | group by ca_county,d_qoy, d_year) | select | ss1.ca_county | ,ss1.d_year | ,ws2.web_sales/ws1.web_sales web_q1_q2_increase | ,ss2.store_sales/ss1.store_sales store_q1_q2_increase | ,ws3.web_sales/ws2.web_sales web_q2_q3_increase | ,ss3.store_sales/ss2.store_sales store_q2_q3_increase | from | ss ss1, ss ss2, ss ss3, ws ws1, ws ws2, ws ws3 | where | ss1.d_qoy = 1 | and ss1.d_year = 2000 | and ss1.ca_county = ss2.ca_county | and ss2.d_qoy = 2 | and ss2.d_year = 2000 | and ss2.ca_county = ss3.ca_county | and ss3.d_qoy = 3 | and ss3.d_year = 2000 | and ss1.ca_county = ws1.ca_county | and ws1.d_qoy = 1 | and ws1.d_year = 2000 | and ws1.ca_county = ws2.ca_county | and ws2.d_qoy = 2 | and ws2.d_year = 2000 | and ws1.ca_county = ws3.ca_county | and ws3.d_qoy = 3 | and ws3.d_year = 2000 | and case when ws1.web_sales > 0 then ws2.web_sales/ws1.web_sales else null end | > case when ss1.store_sales > 0 then ss2.store_sales/ss1.store_sales else null end | and case when ws2.web_sales > 0 then ws3.web_sales/ws2.web_sales else null end | > case when ss2.store_sales > 0 then ss3.store_sales/ss2.store_sales else null end | order by ss1.ca_county """.stripMargin), // Modifications: " -> ` ("q32", """ | select sum(cs_ext_discount_amt) as `excess discount amount` | from | catalog_sales, item, date_dim | where | i_manufact_id = 977 | and i_item_sk = cs_item_sk | and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days) | and d_date_sk = cs_sold_date_sk | and cs_ext_discount_amt > ( | select 1.3 * avg(cs_ext_discount_amt) | from catalog_sales, date_dim | where cs_item_sk = i_item_sk | and d_date between '2000-01-27]' and (cast('2000-01-27' as date) + interval | 90 days) | and d_date_sk = cs_sold_date_sk) |limit 100 """.stripMargin), ("q33", """ | with ss as ( | select | i_manufact_id,sum(ss_ext_sales_price) total_sales | from | store_sales, date_dim, customer_address, item | where | i_manufact_id in (select i_manufact_id | from item | where i_category in ('Electronics')) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 5 | and ss_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_manufact_id), cs as | (select i_manufact_id, sum(cs_ext_sales_price) total_sales | from catalog_sales, date_dim, customer_address, item | where | i_manufact_id in ( | select i_manufact_id from item | where | i_category in ('Electronics')) | and cs_item_sk = i_item_sk | and cs_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 5 | and cs_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_manufact_id), | ws as ( | select i_manufact_id,sum(ws_ext_sales_price) total_sales | from | web_sales, date_dim, customer_address, item | where | i_manufact_id in (select i_manufact_id from item | where i_category in ('Electronics')) | and ws_item_sk = i_item_sk | and ws_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 5 | and ws_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_manufact_id) | select i_manufact_id ,sum(total_sales) total_sales | from (select * from ss | union all | select * from cs | union all | select * from ws) tmp1 | group by i_manufact_id | order by total_sales |limit 100 """.stripMargin), ("q34", """ | select c_last_name, c_first_name, c_salutation, c_preferred_cust_flag, ss_ticket_number, | cnt | FROM | (select ss_ticket_number, ss_customer_sk, count(*) cnt | from store_sales,date_dim,store,household_demographics | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_store_sk = store.s_store_sk | and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk | and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28) | and (household_demographics.hd_buy_potential = '>10000' or | household_demographics.hd_buy_potential = 'unknown') | and household_demographics.hd_vehicle_count > 0 | and (case when household_demographics.hd_vehicle_count > 0 | then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count | else null | end) > 1.2 | and date_dim.d_year in (1999, 1999+1, 1999+2) | and store.s_county in ('Williamson County','Williamson County','Williamson County', | 'Williamson County', | 'Williamson County','Williamson County','Williamson County', | 'Williamson County') | group by ss_ticket_number,ss_customer_sk) dn,customer | where ss_customer_sk = c_customer_sk | and cnt between 15 and 20 | order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc """.stripMargin), ("q35", """ | select | ca_state, | cd_gender, | cd_marital_status, | count(*) cnt1, | min(cd_dep_count), | max(cd_dep_count), | avg(cd_dep_count), | cd_dep_employed_count, | count(*) cnt2, | min(cd_dep_employed_count), | max(cd_dep_employed_count), | avg(cd_dep_employed_count), | cd_dep_college_count, | count(*) cnt3, | min(cd_dep_college_count), | max(cd_dep_college_count), | avg(cd_dep_college_count) | from | customer c,customer_address ca,customer_demographics | where | c.c_current_addr_sk = ca.ca_address_sk and | cd_demo_sk = c.c_current_cdemo_sk and | exists (select * from store_sales, date_dim | where c.c_customer_sk = ss_customer_sk and | ss_sold_date_sk = d_date_sk and | d_year = 2002 and | d_qoy < 4) and | (exists (select * from web_sales, date_dim | where c.c_customer_sk = ws_bill_customer_sk and | ws_sold_date_sk = d_date_sk and | d_year = 2002 and | d_qoy < 4) or | exists (select * from catalog_sales, date_dim | where c.c_customer_sk = cs_ship_customer_sk and | cs_sold_date_sk = d_date_sk and | d_year = 2002 and | d_qoy < 4)) | group by ca_state, cd_gender, cd_marital_status, cd_dep_count, | cd_dep_employed_count, cd_dep_college_count | order by ca_state, cd_gender, cd_marital_status, cd_dep_count, | cd_dep_employed_count, cd_dep_college_count | limit 100 """.stripMargin), ("q36", """ | select | sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin | ,i_category | ,i_class | ,grouping(i_category)+grouping(i_class) as lochierarchy | ,rank() over ( | partition by grouping(i_category)+grouping(i_class), | case when grouping(i_class) = 0 then i_category end | order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent | from | store_sales, date_dim d1, item, store | where | d1.d_year = 2001 | and d1.d_date_sk = ss_sold_date_sk | and i_item_sk = ss_item_sk | and s_store_sk = ss_store_sk | and s_state in ('TN','TN','TN','TN','TN','TN','TN','TN') | group by rollup(i_category,i_class) | order by | lochierarchy desc | ,case when lochierarchy = 0 then i_category end | ,rank_within_parent | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q37", """ | select i_item_id, i_item_desc, i_current_price | from item, inventory, date_dim, catalog_sales | where i_current_price between 68 and 68 + 30 | and inv_item_sk = i_item_sk | and d_date_sk=inv_date_sk | and d_date between cast('2000-02-01' as date) and (cast('2000-02-01' as date) + | interval 60 days) | and i_manufact_id in (677,940,694,808) | and inv_quantity_on_hand between 100 and 500 | and cs_item_sk = i_item_sk | group by i_item_id,i_item_desc,i_current_price | order by i_item_id | limit 100 """.stripMargin), ("q38", """ | select count(*) from ( | select distinct c_last_name, c_first_name, d_date | from store_sales, date_dim, customer | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200 + 11 | intersect | select distinct c_last_name, c_first_name, d_date | from catalog_sales, date_dim, customer | where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk | and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200 + 11 | intersect | select distinct c_last_name, c_first_name, d_date | from web_sales, date_dim, customer | where web_sales.ws_sold_date_sk = date_dim.d_date_sk | and web_sales.ws_bill_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200 + 11 | ) hot_cust | limit 100 """.stripMargin), ("q39a", """ | with inv as | (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy | ,stdev,mean, case mean when 0 then null else stdev/mean end cov | from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy | ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean | from inventory, item, warehouse, date_dim | where inv_item_sk = i_item_sk | and inv_warehouse_sk = w_warehouse_sk | and inv_date_sk = d_date_sk | and d_year = 2001 | group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo | where case mean when 0 then 0 else stdev/mean end > 1) | select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov | ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov | from inv inv1,inv inv2 | where inv1.i_item_sk = inv2.i_item_sk | and inv1.w_warehouse_sk = inv2.w_warehouse_sk | and inv1.d_moy=1 | and inv2.d_moy=1+1 | order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov | ,inv2.d_moy,inv2.mean, inv2.cov """.stripMargin), ("q39b", """ | with inv as | (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy | ,stdev,mean, case mean when 0 then null else stdev/mean end cov | from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy | ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean | from inventory, item, warehouse, date_dim | where inv_item_sk = i_item_sk | and inv_warehouse_sk = w_warehouse_sk | and inv_date_sk = d_date_sk | and d_year = 2001 | group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo | where case mean when 0 then 0 else stdev/mean end > 1) | select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov | ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov | from inv inv1,inv inv2 | where inv1.i_item_sk = inv2.i_item_sk | and inv1.w_warehouse_sk = inv2.w_warehouse_sk | and inv1.d_moy=1 | and inv2.d_moy=1+1 | and inv1.cov > 1.5 | order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov | ,inv2.d_moy,inv2.mean, inv2.cov """.stripMargin), // Modifications: "+ days" -> date_add ("q40", """ | select | w_state | ,i_item_id | ,sum(case when (cast(d_date as date) < cast('2000-03-11' as date)) | then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_before | ,sum(case when (cast(d_date as date) >= cast('2000-03-11' as date)) | then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_after | from | catalog_sales left outer join catalog_returns on | (cs_order_number = cr_order_number | and cs_item_sk = cr_item_sk) | ,warehouse, item, date_dim | where | i_current_price between 0.99 and 1.49 | and i_item_sk = cs_item_sk | and cs_warehouse_sk = w_warehouse_sk | and cs_sold_date_sk = d_date_sk | and d_date between (cast('2000-03-11' as date) - interval 30 days) | and (cast('2000-03-11' as date) + interval 30 days) | group by w_state,i_item_id | order by w_state,i_item_id | limit 100 """.stripMargin), ("q41", """ | select distinct(i_product_name) | from item i1 | where i_manufact_id between 738 and 738+40 | and (select count(*) as item_cnt | from item | where (i_manufact = i1.i_manufact and | ((i_category = 'Women' and | (i_color = 'powder' or i_color = 'khaki') and | (i_units = 'Ounce' or i_units = 'Oz') and | (i_size = 'medium' or i_size = 'extra large') | ) or | (i_category = 'Women' and | (i_color = 'brown' or i_color = 'honeydew') and | (i_units = 'Bunch' or i_units = 'Ton') and | (i_size = 'N/A' or i_size = 'small') | ) or | (i_category = 'Men' and | (i_color = 'floral' or i_color = 'deep') and | (i_units = 'N/A' or i_units = 'Dozen') and | (i_size = 'petite' or i_size = 'large') | ) or | (i_category = 'Men' and | (i_color = 'light' or i_color = 'cornflower') and | (i_units = 'Box' or i_units = 'Pound') and | (i_size = 'medium' or i_size = 'extra large') | ))) or | (i_manufact = i1.i_manufact and | ((i_category = 'Women' and | (i_color = 'midnight' or i_color = 'snow') and | (i_units = 'Pallet' or i_units = 'Gross') and | (i_size = 'medium' or i_size = 'extra large') | ) or | (i_category = 'Women' and | (i_color = 'cyan' or i_color = 'papaya') and | (i_units = 'Cup' or i_units = 'Dram') and | (i_size = 'N/A' or i_size = 'small') | ) or | (i_category = 'Men' and | (i_color = 'orange' or i_color = 'frosted') and | (i_units = 'Each' or i_units = 'Tbl') and | (i_size = 'petite' or i_size = 'large') | ) or | (i_category = 'Men' and | (i_color = 'forest' or i_color = 'ghost') and | (i_units = 'Lb' or i_units = 'Bundle') and | (i_size = 'medium' or i_size = 'extra large') | )))) > 0 | order by i_product_name | limit 100 """.stripMargin), ("q42", """ | select dt.d_year, item.i_category_id, item.i_category, sum(ss_ext_sales_price) | from date_dim dt, store_sales, item | where dt.d_date_sk = store_sales.ss_sold_date_sk | and store_sales.ss_item_sk = item.i_item_sk | and item.i_manager_id = 1 | and dt.d_moy=11 | and dt.d_year=2000 | group by dt.d_year | ,item.i_category_id | ,item.i_category | order by sum(ss_ext_sales_price) desc,dt.d_year | ,item.i_category_id | ,item.i_category | limit 100 """.stripMargin), ("q43", """ | select s_store_name, s_store_id, | sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales, | sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales, | sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales, | sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) | wed_sales, | sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales, | sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales, | sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales | from date_dim, store_sales, store | where d_date_sk = ss_sold_date_sk and | s_store_sk = ss_store_sk and | s_gmt_offset = -5 and | d_year = 2000 | group by s_store_name, s_store_id | order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales, | thu_sales,fri_sales,sat_sales | limit 100 """.stripMargin), ("q44", """ | select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing | from(select * | from (select item_sk,rank() over (order by rank_col asc) rnk | from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col | from store_sales ss1 | where ss_store_sk = 4 | group by ss_item_sk | having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col | from store_sales | where ss_store_sk = 4 | and ss_addr_sk is null | group by ss_store_sk))V1)V11 | where rnk < 11) asceding, | (select * | from (select item_sk,rank() over (order by rank_col desc) rnk | from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col | from store_sales ss1 | where ss_store_sk = 4 | group by ss_item_sk | having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col | from store_sales | where ss_store_sk = 4 | and ss_addr_sk is null | group by ss_store_sk))V2)V21 | where rnk < 11) descending, | item i1, item i2 | where asceding.rnk = descending.rnk | and i1.i_item_sk=asceding.item_sk | and i2.i_item_sk=descending.item_sk | order by asceding.rnk | limit 100 """.stripMargin), ("q45", """ | select ca_zip, ca_city, sum(ws_sales_price) | from web_sales, customer, customer_address, date_dim, item | where ws_bill_customer_sk = c_customer_sk | and c_current_addr_sk = ca_address_sk | and ws_item_sk = i_item_sk | and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', | '85460', '80348', '81792') | or | i_item_id in (select i_item_id | from item | where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29) | ) | ) | and ws_sold_date_sk = d_date_sk | and d_qoy = 2 and d_year = 2001 | group by ca_zip, ca_city | order by ca_zip, ca_city | limit 100 """.stripMargin), ("q46", """ | select c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, amt,profit | from | (select ss_ticket_number | ,ss_customer_sk | ,ca_city bought_city | ,sum(ss_coupon_amt) amt | ,sum(ss_net_profit) profit | from store_sales, date_dim, store, household_demographics, customer_address | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_store_sk = store.s_store_sk | and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk | and store_sales.ss_addr_sk = customer_address.ca_address_sk | and (household_demographics.hd_dep_count = 4 or | household_demographics.hd_vehicle_count= 3) | and date_dim.d_dow in (6,0) | and date_dim.d_year in (1999,1999+1,1999+2) | and store.s_city in ('Fairview','Midway','Fairview','Fairview','Fairview') | group by ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city) dn,customer, | customer_address current_addr | where ss_customer_sk = c_customer_sk | and customer.c_current_addr_sk = current_addr.ca_address_sk | and current_addr.ca_city <> bought_city | order by c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number | limit 100 """.stripMargin), ("q47", """ | with v1 as( | select i_category, i_brand, | s_store_name, s_company_name, | d_year, d_moy, | sum(ss_sales_price) sum_sales, | avg(sum(ss_sales_price)) over | (partition by i_category, i_brand, | s_store_name, s_company_name, d_year) | avg_monthly_sales, | rank() over | (partition by i_category, i_brand, | s_store_name, s_company_name | order by d_year, d_moy) rn | from item, store_sales, date_dim, store | where ss_item_sk = i_item_sk and | ss_sold_date_sk = d_date_sk and | ss_store_sk = s_store_sk and | ( | d_year = 1999 or | ( d_year = 1999-1 and d_moy =12) or | ( d_year = 1999+1 and d_moy =1) | ) | group by i_category, i_brand, | s_store_name, s_company_name, | d_year, d_moy), | v2 as( | select v1.i_category, v1.i_brand, v1.s_store_name, v1.s_company_name, v1.d_year, | v1.d_moy, v1.avg_monthly_sales ,v1.sum_sales, v1_lag.sum_sales psum, | v1_lead.sum_sales nsum | from v1, v1 v1_lag, v1 v1_lead | where v1.i_category = v1_lag.i_category and | v1.i_category = v1_lead.i_category and | v1.i_brand = v1_lag.i_brand and | v1.i_brand = v1_lead.i_brand and | v1.s_store_name = v1_lag.s_store_name and | v1.s_store_name = v1_lead.s_store_name and | v1.s_company_name = v1_lag.s_company_name and | v1.s_company_name = v1_lead.s_company_name and | v1.rn = v1_lag.rn + 1 and | v1.rn = v1_lead.rn - 1) | select * from v2 | where d_year = 1999 and | avg_monthly_sales > 0 and | case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / | avg_monthly_sales else null end > 0.1 | order by sum_sales - avg_monthly_sales, 3 | limit 100 """.stripMargin), ("q48", """ | select sum (ss_quantity) | from store_sales, store, customer_demographics, customer_address, date_dim | where s_store_sk = ss_store_sk | and ss_sold_date_sk = d_date_sk and d_year = 2001 | and | ( | ( | cd_demo_sk = ss_cdemo_sk | and | cd_marital_status = 'M' | and | cd_education_status = '4 yr Degree' | and | ss_sales_price between 100.00 and 150.00 | ) | or | ( | cd_demo_sk = ss_cdemo_sk | and | cd_marital_status = 'D' | and | cd_education_status = '2 yr Degree' | and | ss_sales_price between 50.00 and 100.00 | ) | or | ( | cd_demo_sk = ss_cdemo_sk | and | cd_marital_status = 'S' | and | cd_education_status = 'College' | and | ss_sales_price between 150.00 and 200.00 | ) | ) | and | ( | ( | ss_addr_sk = ca_address_sk | and | ca_country = 'United States' | and | ca_state in ('CO', 'OH', 'TX') | and ss_net_profit between 0 and 2000 | ) | or | (ss_addr_sk = ca_address_sk | and | ca_country = 'United States' | and | ca_state in ('OR', 'MN', 'KY') | and ss_net_profit between 150 and 3000 | ) | or | (ss_addr_sk = ca_address_sk | and | ca_country = 'United States' | and | ca_state in ('VA', 'CA', 'MS') | and ss_net_profit between 50 and 25000 | ) | ) """.stripMargin), // Modifications: "dec" -> "decimal" ("q49", """ | select 'web' as channel, web.item, web.return_ratio, web.return_rank, web.currency_rank | from ( | select | item, return_ratio, currency_ratio, | rank() over (order by return_ratio) as return_rank, | rank() over (order by currency_ratio) as currency_rank | from | ( select ws.ws_item_sk as item | ,(cast(sum(coalesce(wr.wr_return_quantity,0)) as decimal(15,4))/ | cast(sum(coalesce(ws.ws_quantity,0)) as decimal(15,4) )) as return_ratio | ,(cast(sum(coalesce(wr.wr_return_amt,0)) as decimal(15,4))/ | cast(sum(coalesce(ws.ws_net_paid,0)) as decimal(15,4) )) as currency_ratio | from | web_sales ws left outer join web_returns wr | on (ws.ws_order_number = wr.wr_order_number and | ws.ws_item_sk = wr.wr_item_sk) | ,date_dim | where | wr.wr_return_amt > 10000 | and ws.ws_net_profit > 1 | and ws.ws_net_paid > 0 | and ws.ws_quantity > 0 | and ws_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 12 | group by ws.ws_item_sk | ) in_web | ) web | where (web.return_rank <= 10 or web.currency_rank <= 10) | union | select | 'catalog' as channel, catalog.item, catalog.return_ratio, | catalog.return_rank, catalog.currency_rank | from ( | select | item, return_ratio, currency_ratio, | rank() over (order by return_ratio) as return_rank, | rank() over (order by currency_ratio) as currency_rank | from | ( select | cs.cs_item_sk as item | ,(cast(sum(coalesce(cr.cr_return_quantity,0)) as decimal(15,4))/ | cast(sum(coalesce(cs.cs_quantity,0)) as decimal(15,4) )) as return_ratio | ,(cast(sum(coalesce(cr.cr_return_amount,0)) as decimal(15,4))/ | cast(sum(coalesce(cs.cs_net_paid,0)) as decimal(15,4) )) as currency_ratio | from | catalog_sales cs left outer join catalog_returns cr | on (cs.cs_order_number = cr.cr_order_number and | cs.cs_item_sk = cr.cr_item_sk) | ,date_dim | where | cr.cr_return_amount > 10000 | and cs.cs_net_profit > 1 | and cs.cs_net_paid > 0 | and cs.cs_quantity > 0 | and cs_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 12 | group by cs.cs_item_sk | ) in_cat | ) catalog | where (catalog.return_rank <= 10 or catalog.currency_rank <=10) | union | select | 'store' as channel, store.item, store.return_ratio, | store.return_rank, store.currency_rank | from ( | select | item, return_ratio, currency_ratio, | rank() over (order by return_ratio) as return_rank, | rank() over (order by currency_ratio) as currency_rank | from | ( select sts.ss_item_sk as item | ,(cast(sum(coalesce(sr.sr_return_quantity,0)) as decimal(15,4))/ | cast(sum(coalesce(sts.ss_quantity,0)) as decimal(15,4) )) as return_ratio | ,(cast(sum(coalesce(sr.sr_return_amt,0)) as decimal(15,4))/ | cast(sum(coalesce(sts.ss_net_paid,0)) as decimal(15,4) )) as currency_ratio | from | store_sales sts left outer join store_returns sr | on (sts.ss_ticket_number = sr.sr_ticket_number and sts.ss_item_sk = | sr.sr_item_sk) | ,date_dim | where | sr.sr_return_amt > 10000 | and sts.ss_net_profit > 1 | and sts.ss_net_paid > 0 | and sts.ss_quantity > 0 | and ss_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 12 | group by sts.ss_item_sk | ) in_store | ) store | where (store.return_rank <= 10 or store.currency_rank <= 10) | order by 1,4,5 | limit 100 """.stripMargin), // Modifications: " -> ` ("q50", """ | select | s_store_name, s_company_id, s_street_number, s_street_name, s_street_type, | s_suite_number, s_city, s_county, s_state, s_zip | ,sum(case when (sr_returned_date_sk - ss_sold_date_sk <= 30 ) then 1 else 0 end) as | `30 days` | ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 30) and | (sr_returned_date_sk - ss_sold_date_sk <= 60) then 1 else 0 end ) as | `31-60 days` | ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 60) and | (sr_returned_date_sk - ss_sold_date_sk <= 90) then 1 else 0 end) as | `61-90 days` | ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 90) and | (sr_returned_date_sk - ss_sold_date_sk <= 120) then 1 else 0 end) as | `91-120 days` | ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 120) then 1 else 0 end) as | `>120 days` | from | store_sales, store_returns, store, date_dim d1, date_dim d2 | where | d2.d_year = 2001 | and d2.d_moy = 8 | and ss_ticket_number = sr_ticket_number | and ss_item_sk = sr_item_sk | and ss_sold_date_sk = d1.d_date_sk | and sr_returned_date_sk = d2.d_date_sk | and ss_customer_sk = sr_customer_sk | and ss_store_sk = s_store_sk | group by | s_store_name, s_company_id, s_street_number, s_street_name, s_street_type, | s_suite_number, s_city, s_county, s_state, s_zip | order by | s_store_name, s_company_id, s_street_number, s_street_name, s_street_type, | s_suite_number, s_city, s_county, s_state, s_zip | limit 100 """.stripMargin), ("q51", """ | WITH web_v1 as ( | select | ws_item_sk item_sk, d_date, | sum(sum(ws_sales_price)) | over (partition by ws_item_sk order by d_date rows between unbounded preceding | and current row) cume_sales | from web_sales, date_dim | where ws_sold_date_sk=d_date_sk | and d_month_seq between 1200 and 1200+11 | and ws_item_sk is not NULL | group by ws_item_sk, d_date), | store_v1 as ( | select | ss_item_sk item_sk, d_date, | sum(sum(ss_sales_price)) | over (partition by ss_item_sk order by d_date rows between unbounded preceding | and current row) cume_sales | from store_sales, date_dim | where ss_sold_date_sk=d_date_sk | and d_month_seq between 1200 and 1200+11 | and ss_item_sk is not NULL | group by ss_item_sk, d_date) | select * | from (select item_sk, d_date, web_sales, store_sales | ,max(web_sales) | over (partition by item_sk order by d_date rows between unbounded preceding | and current row) web_cumulative | ,max(store_sales) | over (partition by item_sk order by d_date rows between unbounded preceding | and current row) store_cumulative | from (select case when web.item_sk is not null then web.item_sk else store.item_sk | end item_sk | ,case when web.d_date is not null then web.d_date else store.d_date | end d_date | ,web.cume_sales web_sales | ,store.cume_sales store_sales | from web_v1 web full outer join store_v1 store on (web.item_sk = store.item_sk | and web.d_date = store.d_date) | )x )y | where web_cumulative > store_cumulative | order by item_sk, d_date | limit 100 """.stripMargin), ("q52", """ | select dt.d_year | ,item.i_brand_id brand_id | ,item.i_brand brand | ,sum(ss_ext_sales_price) ext_price | from date_dim dt, store_sales, item | where dt.d_date_sk = store_sales.ss_sold_date_sk | and store_sales.ss_item_sk = item.i_item_sk | and item.i_manager_id = 1 | and dt.d_moy=11 | and dt.d_year=2000 | group by dt.d_year, item.i_brand, item.i_brand_id | order by dt.d_year, ext_price desc, brand_id |limit 100 """.stripMargin), ("q53", """ | select * from | (select i_manufact_id, | sum(ss_sales_price) sum_sales, | avg(sum(ss_sales_price)) over (partition by i_manufact_id) avg_quarterly_sales | from item, store_sales, date_dim, store | where ss_item_sk = i_item_sk and | ss_sold_date_sk = d_date_sk and | ss_store_sk = s_store_sk and | d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6, | 1200+7,1200+8,1200+9,1200+10,1200+11) and | ((i_category in ('Books','Children','Electronics') and | i_class in ('personal','portable','reference','self-help') and | i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7', | 'exportiunivamalg #9','scholaramalgamalg #9')) | or | (i_category in ('Women','Music','Men') and | i_class in ('accessories','classical','fragrances','pants') and | i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1', | 'importoamalg #1'))) | group by i_manufact_id, d_qoy ) tmp1 | where case when avg_quarterly_sales > 0 | then abs (sum_sales - avg_quarterly_sales)/ avg_quarterly_sales | else null end > 0.1 | order by avg_quarterly_sales, | sum_sales, | i_manufact_id | limit 100 """.stripMargin), ("q54", """ | with my_customers as ( | select distinct c_customer_sk | , c_current_addr_sk | from | ( select cs_sold_date_sk sold_date_sk, | cs_bill_customer_sk customer_sk, | cs_item_sk item_sk | from catalog_sales | union all | select ws_sold_date_sk sold_date_sk, | ws_bill_customer_sk customer_sk, | ws_item_sk item_sk | from web_sales | ) cs_or_ws_sales, | item, | date_dim, | customer | where sold_date_sk = d_date_sk | and item_sk = i_item_sk | and i_category = 'Women' | and i_class = 'maternity' | and c_customer_sk = cs_or_ws_sales.customer_sk | and d_moy = 12 | and d_year = 1998 | ) | , my_revenue as ( | select c_customer_sk, | sum(ss_ext_sales_price) as revenue | from my_customers, | store_sales, | customer_address, | store, | date_dim | where c_current_addr_sk = ca_address_sk | and ca_county = s_county | and ca_state = s_state | and ss_sold_date_sk = d_date_sk | and c_customer_sk = ss_customer_sk | and d_month_seq between (select distinct d_month_seq+1 | from date_dim where d_year = 1998 and d_moy = 12) | and (select distinct d_month_seq+3 | from date_dim where d_year = 1998 and d_moy = 12) | group by c_customer_sk | ) | , segments as | (select cast((revenue/50) as int) as segment from my_revenue) | select segment, count(*) as num_customers, segment*50 as segment_base | from segments | group by segment | order by segment, num_customers | limit 100 """.stripMargin), ("q55", """ |select i_brand_id brand_id, i_brand brand, | sum(ss_ext_sales_price) ext_price | from date_dim, store_sales, item | where d_date_sk = ss_sold_date_sk | and ss_item_sk = i_item_sk | and i_manager_id=28 | and d_moy=11 | and d_year=1999 | group by i_brand, i_brand_id | order by ext_price desc, brand_id | limit 100 """.stripMargin), ("q56", """ | with ss as ( | select i_item_id,sum(ss_ext_sales_price) total_sales | from | store_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_color in ('slate','blanched', | 'burnished')) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 2 | and ss_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id), | cs as ( | select i_item_id,sum(cs_ext_sales_price) total_sales | from | catalog_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_color in ('slate','blanched', | 'burnished')) | and cs_item_sk = i_item_sk | and cs_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 2 | and cs_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id), | ws as ( | select i_item_id,sum(ws_ext_sales_price) total_sales | from | web_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_color in ('slate','blanched', | 'burnished')) | and ws_item_sk = i_item_sk | and ws_sold_date_sk = d_date_sk | and d_year = 2001 | and d_moy = 2 | and ws_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id) | select i_item_id ,sum(total_sales) total_sales | from (select * from ss | union all | select * from cs | union all | select * from ws) tmp1 | group by i_item_id | order by total_sales | limit 100 """.stripMargin), ("q57", """ | with v1 as( | select i_category, i_brand, | cc_name, | d_year, d_moy, | sum(cs_sales_price) sum_sales, | avg(sum(cs_sales_price)) over | (partition by i_category, i_brand, cc_name, d_year) | avg_monthly_sales, | rank() over | (partition by i_category, i_brand, cc_name | order by d_year, d_moy) rn | from item, catalog_sales, date_dim, call_center | where cs_item_sk = i_item_sk and | cs_sold_date_sk = d_date_sk and | cc_call_center_sk= cs_call_center_sk and | ( | d_year = 1999 or | ( d_year = 1999-1 and d_moy =12) or | ( d_year = 1999+1 and d_moy =1) | ) | group by i_category, i_brand, | cc_name , d_year, d_moy), | v2 as( | select v1.i_category, v1.i_brand, v1.cc_name, v1.d_year, v1.d_moy | ,v1.avg_monthly_sales | ,v1.sum_sales, v1_lag.sum_sales psum, v1_lead.sum_sales nsum | from v1, v1 v1_lag, v1 v1_lead | where v1.i_category = v1_lag.i_category and | v1.i_category = v1_lead.i_category and | v1.i_brand = v1_lag.i_brand and | v1.i_brand = v1_lead.i_brand and | v1. cc_name = v1_lag. cc_name and | v1. cc_name = v1_lead. cc_name and | v1.rn = v1_lag.rn + 1 and | v1.rn = v1_lead.rn - 1) | select * from v2 | where d_year = 1999 and | avg_monthly_sales > 0 and | case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / | avg_monthly_sales else null end > 0.1 | order by sum_sales - avg_monthly_sales, 3 | limit 100 """.stripMargin), ("q58", """ | with ss_items as | (select i_item_id item_id, sum(ss_ext_sales_price) ss_item_rev | from store_sales, item, date_dim | where ss_item_sk = i_item_sk | and d_date in (select d_date | from date_dim | where d_week_seq = (select d_week_seq | from date_dim | where d_date = '2000-01-03')) | and ss_sold_date_sk = d_date_sk | group by i_item_id), | cs_items as | (select i_item_id item_id | ,sum(cs_ext_sales_price) cs_item_rev | from catalog_sales, item, date_dim | where cs_item_sk = i_item_sk | and d_date in (select d_date | from date_dim | where d_week_seq = (select d_week_seq | from date_dim | where d_date = '2000-01-03')) | and cs_sold_date_sk = d_date_sk | group by i_item_id), | ws_items as | (select i_item_id item_id, sum(ws_ext_sales_price) ws_item_rev | from web_sales, item, date_dim | where ws_item_sk = i_item_sk | and d_date in (select d_date | from date_dim | where d_week_seq =(select d_week_seq | from date_dim | where d_date = '2000-01-03')) | and ws_sold_date_sk = d_date_sk | group by i_item_id) | select ss_items.item_id | ,ss_item_rev | ,ss_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ss_dev | ,cs_item_rev | ,cs_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 cs_dev | ,ws_item_rev | ,ws_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ws_dev | ,(ss_item_rev+cs_item_rev+ws_item_rev)/3 average | from ss_items,cs_items,ws_items | where ss_items.item_id=cs_items.item_id | and ss_items.item_id=ws_items.item_id | and ss_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev | and ss_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev | and cs_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev | and cs_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev | and ws_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev | and ws_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev | order by item_id, ss_item_rev | limit 100 """.stripMargin), ("q59", """ | with wss as | (select d_week_seq, | ss_store_sk, | sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales, | sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales, | sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales, | sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) | wed_sales, | sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales, | sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales, | sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales | from store_sales,date_dim | where d_date_sk = ss_sold_date_sk | group by d_week_seq,ss_store_sk | ) | select s_store_name1,s_store_id1,d_week_seq1 | ,sun_sales1/sun_sales2,mon_sales1/mon_sales2 | ,tue_sales1/tue_sales2,wed_sales1/wed_sales2,thu_sales1/thu_sales2 | ,fri_sales1/fri_sales2,sat_sales1/sat_sales2 | from | (select s_store_name s_store_name1,wss.d_week_seq d_week_seq1 | ,s_store_id s_store_id1,sun_sales sun_sales1 | ,mon_sales mon_sales1,tue_sales tue_sales1 | ,wed_sales wed_sales1,thu_sales thu_sales1 | ,fri_sales fri_sales1,sat_sales sat_sales1 | from wss,store,date_dim d | where d.d_week_seq = wss.d_week_seq and | ss_store_sk = s_store_sk and | d_month_seq between 1212 and 1212 + 11) y, | (select s_store_name s_store_name2,wss.d_week_seq d_week_seq2 | ,s_store_id s_store_id2,sun_sales sun_sales2 | ,mon_sales mon_sales2,tue_sales tue_sales2 | ,wed_sales wed_sales2,thu_sales thu_sales2 | ,fri_sales fri_sales2,sat_sales sat_sales2 | from wss,store,date_dim d | where d.d_week_seq = wss.d_week_seq and | ss_store_sk = s_store_sk and | d_month_seq between 1212+ 12 and 1212 + 23) x | where s_store_id1=s_store_id2 | and d_week_seq1=d_week_seq2-52 | order by s_store_name1,s_store_id1,d_week_seq1 | limit 100 """.stripMargin), ("q60", """ | with ss as ( | select i_item_id,sum(ss_ext_sales_price) total_sales | from store_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_category in ('Music')) | and ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 9 | and ss_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id), | cs as ( | select i_item_id,sum(cs_ext_sales_price) total_sales | from catalog_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_category in ('Music')) | and cs_item_sk = i_item_sk | and cs_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 9 | and cs_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id), | ws as ( | select i_item_id,sum(ws_ext_sales_price) total_sales | from web_sales, date_dim, customer_address, item | where | i_item_id in (select i_item_id from item where i_category in ('Music')) | and ws_item_sk = i_item_sk | and ws_sold_date_sk = d_date_sk | and d_year = 1998 | and d_moy = 9 | and ws_bill_addr_sk = ca_address_sk | and ca_gmt_offset = -5 | group by i_item_id) | select i_item_id, sum(total_sales) total_sales | from (select * from ss | union all | select * from cs | union all | select * from ws) tmp1 | group by i_item_id | order by i_item_id, total_sales | limit 100 """.stripMargin), ("q61", s""" | select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4)) | *100 | from | (select sum(ss_ext_sales_price) promotions | from store_sales, store, promotion, date_dim, customer, customer_address, item | where ss_sold_date_sk = d_date_sk | and ss_store_sk = s_store_sk | and ss_promo_sk = p_promo_sk | and ss_customer_sk= c_customer_sk | and ca_address_sk = c_current_addr_sk | and ss_item_sk = i_item_sk | and ca_gmt_offset = -5 | and i_category = 'Jewelry' | and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y') | and s_gmt_offset = -5 | and d_year = 1998 | and d_moy = 11) promotional_sales, | (select sum(ss_ext_sales_price) total | from store_sales, store, date_dim, customer, customer_address, item | where ss_sold_date_sk = d_date_sk | and ss_store_sk = s_store_sk | and ss_customer_sk= c_customer_sk | and ca_address_sk = c_current_addr_sk | and ss_item_sk = i_item_sk | and ca_gmt_offset = -5 | and i_category = 'Jewelry' | and s_gmt_offset = -5 | and d_year = 1998 | and d_moy = 11) all_sales | order by promotions, total | limit 100 """.stripMargin), // Modifications: " -> ` ("q62", """ | select | substr(w_warehouse_name,1,20) | ,sm_type | ,web_name | ,sum(case when (ws_ship_date_sk - ws_sold_date_sk <= 30 ) then 1 else 0 end) as `30 | days` | ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 30) and | (ws_ship_date_sk - ws_sold_date_sk <= 60) then 1 else 0 end ) as | `31-60 days` | ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 60) and | (ws_ship_date_sk - ws_sold_date_sk <= 90) then 1 else 0 end) as `61-90 | days` | ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 90) and | (ws_ship_date_sk - ws_sold_date_sk <= 120) then 1 else 0 end) as | `91-120 days` | ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 120) then 1 else 0 end) as `>120 | days` | from | web_sales, warehouse, ship_mode, web_site, date_dim | where | d_month_seq between 1200 and 1200 + 11 | and ws_ship_date_sk = d_date_sk | and ws_warehouse_sk = w_warehouse_sk | and ws_ship_mode_sk = sm_ship_mode_sk | and ws_web_site_sk = web_site_sk | group by | substr(w_warehouse_name,1,20), sm_type, web_name | order by | substr(w_warehouse_name,1,20), sm_type, web_name | limit 100 """.stripMargin), ("q63", """ | select * | from (select i_manager_id | ,sum(ss_sales_price) sum_sales | ,avg(sum(ss_sales_price)) over (partition by i_manager_id) avg_monthly_sales | from item | ,store_sales | ,date_dim | ,store | where ss_item_sk = i_item_sk | and ss_sold_date_sk = d_date_sk | and ss_store_sk = s_store_sk | and d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6,1200+7, | 1200+8,1200+9,1200+10,1200+11) | and (( i_category in ('Books','Children','Electronics') | and i_class in ('personal','portable','refernece','self-help') | and i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7', | 'exportiunivamalg #9','scholaramalgamalg #9')) | or( i_category in ('Women','Music','Men') | and i_class in ('accessories','classical','fragrances','pants') | and i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1', | 'importoamalg #1'))) | group by i_manager_id, d_moy) tmp1 | where case when avg_monthly_sales > 0 then abs (sum_sales - avg_monthly_sales) / | avg_monthly_sales else null end > 0.1 | order by i_manager_id | ,avg_monthly_sales | ,sum_sales | limit 100 """.stripMargin), ("q64", """ | with cs_ui as | (select cs_item_sk | ,sum(cs_ext_list_price) as sale,sum | (cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund | from catalog_sales | ,catalog_returns | where cs_item_sk = cr_item_sk | and cs_order_number = cr_order_number | group by cs_item_sk | having sum(cs_ext_list_price)>2*sum | (cr_refunded_cash+cr_reversed_charge+cr_store_credit)), | cross_sales as | (select i_product_name product_name, i_item_sk item_sk, s_store_name store_name, s_zip | store_zip, | ad1.ca_street_number b_street_number, ad1.ca_street_name b_streen_name, | ad1.ca_city b_city, | ad1.ca_zip b_zip, ad2.ca_street_number c_street_number, ad2.ca_street_name | c_street_name, | ad2.ca_city c_city, ad2.ca_zip c_zip, d1.d_year as syear, d2.d_year as fsyear, | d3.d_year s2year, | count(*) cnt, sum(ss_wholesale_cost) s1, sum(ss_list_price) s2, sum | (ss_coupon_amt) s3 | FROM store_sales, store_returns, cs_ui, date_dim d1, date_dim d2, date_dim d3, | store, customer, customer_demographics cd1, customer_demographics cd2, | promotion, household_demographics hd1, household_demographics hd2, | customer_address ad1, customer_address ad2, income_band ib1, income_band ib2, item | WHERE ss_store_sk = s_store_sk AND | ss_sold_date_sk = d1.d_date_sk AND | ss_customer_sk = c_customer_sk AND | ss_cdemo_sk= cd1.cd_demo_sk AND | ss_hdemo_sk = hd1.hd_demo_sk AND | ss_addr_sk = ad1.ca_address_sk and | ss_item_sk = i_item_sk and | ss_item_sk = sr_item_sk and | ss_ticket_number = sr_ticket_number and | ss_item_sk = cs_ui.cs_item_sk and | c_current_cdemo_sk = cd2.cd_demo_sk AND | c_current_hdemo_sk = hd2.hd_demo_sk AND | c_current_addr_sk = ad2.ca_address_sk and | c_first_sales_date_sk = d2.d_date_sk and | c_first_shipto_date_sk = d3.d_date_sk and | ss_promo_sk = p_promo_sk and | hd1.hd_income_band_sk = ib1.ib_income_band_sk and | hd2.hd_income_band_sk = ib2.ib_income_band_sk and | cd1.cd_marital_status <> cd2.cd_marital_status and | i_color in ('purple','burlywood','indian','spring','floral','medium') and | i_current_price between 64 and 64 + 10 and | i_current_price between 64 + 1 and 64 + 15 | group by i_product_name, i_item_sk, s_store_name, s_zip, ad1.ca_street_number, | ad1.ca_street_name, ad1.ca_city, ad1.ca_zip, ad2.ca_street_number, | ad2.ca_street_name, ad2.ca_city, ad2.ca_zip, d1.d_year, d2.d_year, d3.d_year | ) | select cs1.product_name, cs1.store_name, cs1.store_zip, cs1.b_street_number, | cs1.b_streen_name, cs1.b_city, cs1.b_zip, cs1.c_street_number, cs1.c_street_name, | cs1.c_city, cs1.c_zip, cs1.syear, cs1.cnt, cs1.s1, cs1.s2, cs1.s3, cs2.s1, | cs2.s2, cs2.s3, cs2.syear, cs2.cnt | from cross_sales cs1,cross_sales cs2 | where cs1.item_sk=cs2.item_sk and | cs1.syear = 1999 and | cs2.syear = 1999 + 1 and | cs2.cnt <= cs1.cnt and | cs1.store_name = cs2.store_name and | cs1.store_zip = cs2.store_zip | order by cs1.product_name, cs1.store_name, cs2.cnt """.stripMargin), ("q65", """ | select | s_store_name, i_item_desc, sc.revenue, i_current_price, i_wholesale_cost, i_brand | from store, item, | (select ss_store_sk, avg(revenue) as ave | from | (select ss_store_sk, ss_item_sk, | sum(ss_sales_price) as revenue | from store_sales, date_dim | where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11 | group by ss_store_sk, ss_item_sk) sa | group by ss_store_sk) sb, | (select ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue | from store_sales, date_dim | where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11 | group by ss_store_sk, ss_item_sk) sc | where sb.ss_store_sk = sc.ss_store_sk and | sc.revenue <= 0.1 * sb.ave and | s_store_sk = sc.ss_store_sk and | i_item_sk = sc.ss_item_sk | order by s_store_name, i_item_desc | limit 100 """.stripMargin), // Modifications: "||" -> concat ("q66", """ | select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, | ship_carriers, year | ,sum(jan_sales) as jan_sales | ,sum(feb_sales) as feb_sales | ,sum(mar_sales) as mar_sales | ,sum(apr_sales) as apr_sales | ,sum(may_sales) as may_sales | ,sum(jun_sales) as jun_sales | ,sum(jul_sales) as jul_sales | ,sum(aug_sales) as aug_sales | ,sum(sep_sales) as sep_sales | ,sum(oct_sales) as oct_sales | ,sum(nov_sales) as nov_sales | ,sum(dec_sales) as dec_sales | ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot | ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot | ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot | ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot | ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot | ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot | ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot | ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot | ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot | ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot | ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot | ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot | ,sum(jan_net) as jan_net | ,sum(feb_net) as feb_net | ,sum(mar_net) as mar_net | ,sum(apr_net) as apr_net | ,sum(may_net) as may_net | ,sum(jun_net) as jun_net | ,sum(jul_net) as jul_net | ,sum(aug_net) as aug_net | ,sum(sep_net) as sep_net | ,sum(oct_net) as oct_net | ,sum(nov_net) as nov_net | ,sum(dec_net) as dec_net | from ( | (select | w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country | ,concat('DHL', ',', 'BARIAN') as ship_carriers | ,d_year as year | ,sum(case when d_moy = 1 then ws_ext_sales_price * ws_quantity else 0 end) as | jan_sales | ,sum(case when d_moy = 2 then ws_ext_sales_price * ws_quantity else 0 end) as | feb_sales | ,sum(case when d_moy = 3 then ws_ext_sales_price * ws_quantity else 0 end) as | mar_sales | ,sum(case when d_moy = 4 then ws_ext_sales_price * ws_quantity else 0 end) as | apr_sales | ,sum(case when d_moy = 5 then ws_ext_sales_price * ws_quantity else 0 end) as | may_sales | ,sum(case when d_moy = 6 then ws_ext_sales_price * ws_quantity else 0 end) as | jun_sales | ,sum(case when d_moy = 7 then ws_ext_sales_price * ws_quantity else 0 end) as | jul_sales | ,sum(case when d_moy = 8 then ws_ext_sales_price * ws_quantity else 0 end) as | aug_sales | ,sum(case when d_moy = 9 then ws_ext_sales_price * ws_quantity else 0 end) as | sep_sales | ,sum(case when d_moy = 10 then ws_ext_sales_price * ws_quantity else 0 end) as | oct_sales | ,sum(case when d_moy = 11 then ws_ext_sales_price * ws_quantity else 0 end) as | nov_sales | ,sum(case when d_moy = 12 then ws_ext_sales_price * ws_quantity else 0 end) as | dec_sales | ,sum(case when d_moy = 1 then ws_net_paid * ws_quantity else 0 end) as jan_net | ,sum(case when d_moy = 2 then ws_net_paid * ws_quantity else 0 end) as feb_net | ,sum(case when d_moy = 3 then ws_net_paid * ws_quantity else 0 end) as mar_net | ,sum(case when d_moy = 4 then ws_net_paid * ws_quantity else 0 end) as apr_net | ,sum(case when d_moy = 5 then ws_net_paid * ws_quantity else 0 end) as may_net | ,sum(case when d_moy = 6 then ws_net_paid * ws_quantity else 0 end) as jun_net | ,sum(case when d_moy = 7 then ws_net_paid * ws_quantity else 0 end) as jul_net | ,sum(case when d_moy = 8 then ws_net_paid * ws_quantity else 0 end) as aug_net | ,sum(case when d_moy = 9 then ws_net_paid * ws_quantity else 0 end) as sep_net | ,sum(case when d_moy = 10 then ws_net_paid * ws_quantity else 0 end) as oct_net | ,sum(case when d_moy = 11 then ws_net_paid * ws_quantity else 0 end) as nov_net | ,sum(case when d_moy = 12 then ws_net_paid * ws_quantity else 0 end) as dec_net | from | web_sales, warehouse, date_dim, time_dim, ship_mode | where | ws_warehouse_sk = w_warehouse_sk | and ws_sold_date_sk = d_date_sk | and ws_sold_time_sk = t_time_sk | and ws_ship_mode_sk = sm_ship_mode_sk | and d_year = 2001 | and t_time between 30838 and 30838+28800 | and sm_carrier in ('DHL','BARIAN') | group by | w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year) | union all | (select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country | ,concat('DHL', ',', 'BARIAN') as ship_carriers | ,d_year as year | ,sum(case when d_moy = 1 then cs_sales_price * cs_quantity else 0 end) as jan_sales | ,sum(case when d_moy = 2 then cs_sales_price * cs_quantity else 0 end) as feb_sales | ,sum(case when d_moy = 3 then cs_sales_price * cs_quantity else 0 end) as mar_sales | ,sum(case when d_moy = 4 then cs_sales_price * cs_quantity else 0 end) as apr_sales | ,sum(case when d_moy = 5 then cs_sales_price * cs_quantity else 0 end) as may_sales | ,sum(case when d_moy = 6 then cs_sales_price * cs_quantity else 0 end) as jun_sales | ,sum(case when d_moy = 7 then cs_sales_price * cs_quantity else 0 end) as jul_sales | ,sum(case when d_moy = 8 then cs_sales_price * cs_quantity else 0 end) as aug_sales | ,sum(case when d_moy = 9 then cs_sales_price * cs_quantity else 0 end) as sep_sales | ,sum(case when d_moy = 10 then cs_sales_price * cs_quantity else 0 end) as oct_sales | ,sum(case when d_moy = 11 then cs_sales_price * cs_quantity else 0 end) as nov_sales | ,sum(case when d_moy = 12 then cs_sales_price * cs_quantity else 0 end) as dec_sales | ,sum(case when d_moy = 1 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | jan_net | ,sum(case when d_moy = 2 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | feb_net | ,sum(case when d_moy = 3 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | mar_net | ,sum(case when d_moy = 4 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | apr_net | ,sum(case when d_moy = 5 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | may_net | ,sum(case when d_moy = 6 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | jun_net | ,sum(case when d_moy = 7 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | jul_net | ,sum(case when d_moy = 8 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | aug_net | ,sum(case when d_moy = 9 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | sep_net | ,sum(case when d_moy = 10 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | oct_net | ,sum(case when d_moy = 11 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | nov_net | ,sum(case when d_moy = 12 then cs_net_paid_inc_tax * cs_quantity else 0 end) as | dec_net | from | catalog_sales, warehouse, date_dim, time_dim, ship_mode | where | cs_warehouse_sk = w_warehouse_sk | and cs_sold_date_sk = d_date_sk | and cs_sold_time_sk = t_time_sk | and cs_ship_mode_sk = sm_ship_mode_sk | and d_year = 2001 | and t_time between 30838 AND 30838+28800 | and sm_carrier in ('DHL','BARIAN') | group by | w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year | ) | ) x | group by | w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, | ship_carriers, year | order by w_warehouse_name | limit 100 """.stripMargin), ("q67", """ | select * from | (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy, | s_store_id, | sumsales, rank() over (partition by i_category order by sumsales desc) rk | from | (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy, | s_store_id, sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales | from store_sales, date_dim, store, item | where ss_sold_date_sk=d_date_sk | and ss_item_sk=i_item_sk | and ss_store_sk = s_store_sk | and d_month_seq between 1200 and 1200+11 | group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy, | d_moy,s_store_id))dw1) dw2 | where rk <= 100 | order by | i_category, i_class, i_brand, i_product_name, d_year, | d_qoy, d_moy, s_store_id, sumsales, rk | limit 100 """.stripMargin), ("q68", """ | select | c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, extended_price, | extended_tax, list_price | from (select | ss_ticket_number, ss_customer_sk, ca_city bought_city, | sum(ss_ext_sales_price) extended_price, | sum(ss_ext_list_price) list_price, | sum(ss_ext_tax) extended_tax | from store_sales, date_dim, store, household_demographics, customer_address | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_store_sk = store.s_store_sk | and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk | and store_sales.ss_addr_sk = customer_address.ca_address_sk | and date_dim.d_dom between 1 and 2 | and (household_demographics.hd_dep_count = 4 or | household_demographics.hd_vehicle_count = 3) | and date_dim.d_year in (1999,1999+1,1999+2) | and store.s_city in ('Midway','Fairview') | group by ss_ticket_number, ss_customer_sk, ss_addr_sk,ca_city) dn, | customer, | customer_address current_addr | where ss_customer_sk = c_customer_sk | and customer.c_current_addr_sk = current_addr.ca_address_sk | and current_addr.ca_city <> bought_city | order by c_last_name, ss_ticket_number | limit 100 """.stripMargin), ("q69", """ | select | cd_gender, cd_marital_status, cd_education_status, count(*) cnt1, | cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3 | from | customer c,customer_address ca,customer_demographics | where | c.c_current_addr_sk = ca.ca_address_sk and | ca_state in ('KY', 'GA', 'NM') and | cd_demo_sk = c.c_current_cdemo_sk and | exists (select * from store_sales, date_dim | where c.c_customer_sk = ss_customer_sk and | ss_sold_date_sk = d_date_sk and | d_year = 2001 and | d_moy between 4 and 4+2) and | (not exists (select * from web_sales, date_dim | where c.c_customer_sk = ws_bill_customer_sk and | ws_sold_date_sk = d_date_sk and | d_year = 2001 and | d_moy between 4 and 4+2) and | not exists (select * from catalog_sales, date_dim | where c.c_customer_sk = cs_ship_customer_sk and | cs_sold_date_sk = d_date_sk and | d_year = 2001 and | d_moy between 4 and 4+2)) | group by cd_gender, cd_marital_status, cd_education_status, | cd_purchase_estimate, cd_credit_rating | order by cd_gender, cd_marital_status, cd_education_status, | cd_purchase_estimate, cd_credit_rating | limit 100 """.stripMargin), ("q70", """ | select | sum(ss_net_profit) as total_sum, s_state, s_county | ,grouping(s_state)+grouping(s_county) as lochierarchy | ,rank() over ( | partition by grouping(s_state)+grouping(s_county), | case when grouping(s_county) = 0 then s_state end | order by sum(ss_net_profit) desc) as rank_within_parent | from | store_sales, date_dim d1, store | where | d1.d_month_seq between 1200 and 1200+11 | and d1.d_date_sk = ss_sold_date_sk | and s_store_sk = ss_store_sk | and s_state in | (select s_state from | (select s_state as s_state, | rank() over ( partition by s_state order by sum(ss_net_profit) | desc) as ranking | from store_sales, store, date_dim | where d_month_seq between 1200 and 1200+11 | and d_date_sk = ss_sold_date_sk | and s_store_sk = ss_store_sk | group by s_state) tmp1 | where ranking <= 5) | group by rollup(s_state,s_county) | order by | lochierarchy desc | ,case when lochierarchy = 0 then s_state end | ,rank_within_parent | limit 100 """.stripMargin), ("q71", """ | select i_brand_id brand_id, i_brand brand,t_hour,t_minute, | sum(ext_price) ext_price | from item, | (select | ws_ext_sales_price as ext_price, | ws_sold_date_sk as sold_date_sk, | ws_item_sk as sold_item_sk, | ws_sold_time_sk as time_sk | from web_sales, date_dim | where d_date_sk = ws_sold_date_sk | and d_moy=11 | and d_year=1999 | union all | select | cs_ext_sales_price as ext_price, | cs_sold_date_sk as sold_date_sk, | cs_item_sk as sold_item_sk, | cs_sold_time_sk as time_sk | from catalog_sales, date_dim | where d_date_sk = cs_sold_date_sk | and d_moy=11 | and d_year=1999 | union all | select | ss_ext_sales_price as ext_price, | ss_sold_date_sk as sold_date_sk, | ss_item_sk as sold_item_sk, | ss_sold_time_sk as time_sk | from store_sales,date_dim | where d_date_sk = ss_sold_date_sk | and d_moy=11 | and d_year=1999 | ) as tmp, time_dim | where | sold_item_sk = i_item_sk | and i_manager_id=1 | and time_sk = t_time_sk | and (t_meal_time = 'breakfast' or t_meal_time = 'dinner') | group by i_brand, i_brand_id,t_hour,t_minute | order by ext_price desc, brand_id """.stripMargin), // Modifications: "+ days" -> date_add ("q72", """ | select i_item_desc | ,w_warehouse_name | ,d1.d_week_seq | ,count(case when p_promo_sk is null then 1 else 0 end) no_promo | ,count(case when p_promo_sk is not null then 1 else 0 end) promo | ,count(*) total_cnt | from catalog_sales | join inventory on (cs_item_sk = inv_item_sk) | join warehouse on (w_warehouse_sk=inv_warehouse_sk) | join item on (i_item_sk = cs_item_sk) | join customer_demographics on (cs_bill_cdemo_sk = cd_demo_sk) | join household_demographics on (cs_bill_hdemo_sk = hd_demo_sk) | join date_dim d1 on (cs_sold_date_sk = d1.d_date_sk) | join date_dim d2 on (inv_date_sk = d2.d_date_sk) | join date_dim d3 on (cs_ship_date_sk = d3.d_date_sk) | left outer join promotion on (cs_promo_sk=p_promo_sk) | left outer join catalog_returns on (cr_item_sk = cs_item_sk and cr_order_number = | cs_order_number) | where d1.d_week_seq = d2.d_week_seq | and inv_quantity_on_hand < cs_quantity | and d3.d_date > (cast(d1.d_date AS DATE) + interval 5 days) | and hd_buy_potential = '>10000' | and d1.d_year = 1999 | and hd_buy_potential = '>10000' | and cd_marital_status = 'D' | and d1.d_year = 1999 | group by i_item_desc,w_warehouse_name,d1.d_week_seq | order by total_cnt desc, i_item_desc, w_warehouse_name, d_week_seq | limit 100 """.stripMargin), ("q73", """ | select | c_last_name, c_first_name, c_salutation, c_preferred_cust_flag, | ss_ticket_number, cnt from | (select ss_ticket_number, ss_customer_sk, count(*) cnt | from store_sales,date_dim,store,household_demographics | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_store_sk = store.s_store_sk | and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk | and date_dim.d_dom between 1 and 2 | and (household_demographics.hd_buy_potential = '>10000' or | household_demographics.hd_buy_potential = 'unknown') | and household_demographics.hd_vehicle_count > 0 | and case when household_demographics.hd_vehicle_count > 0 then | household_demographics.hd_dep_count/ household_demographics | .hd_vehicle_count else null end > 1 | and date_dim.d_year in (1999,1999+1,1999+2) | and store.s_county in ('Williamson County','Franklin Parish','Bronx County','Orange | County') | group by ss_ticket_number,ss_customer_sk) dj,customer | where ss_customer_sk = c_customer_sk | and cnt between 1 and 5 | order by cnt desc """.stripMargin), ("q74", """ | with year_total as ( | select | c_customer_id customer_id, c_first_name customer_first_name, | c_last_name customer_last_name, d_year as year, | sum(ss_net_paid) year_total, 's' sale_type | from | customer, store_sales, date_dim | where c_customer_sk = ss_customer_sk | and ss_sold_date_sk = d_date_sk | and d_year in (2001,2001+1) | group by | c_customer_id, c_first_name, c_last_name, d_year | union all | select | c_customer_id customer_id, c_first_name customer_first_name, | c_last_name customer_last_name, d_year as year, | sum(ws_net_paid) year_total, 'w' sale_type | from | customer, web_sales, date_dim | where c_customer_sk = ws_bill_customer_sk | and ws_sold_date_sk = d_date_sk | and d_year in (2001,2001+1) | group by | c_customer_id, c_first_name, c_last_name, d_year) | select | t_s_secyear.customer_id, t_s_secyear.customer_first_name, t_s_secyear | .customer_last_name | from | year_total t_s_firstyear, year_total t_s_secyear, | year_total t_w_firstyear, year_total t_w_secyear | where t_s_secyear.customer_id = t_s_firstyear.customer_id | and t_s_firstyear.customer_id = t_w_secyear.customer_id | and t_s_firstyear.customer_id = t_w_firstyear.customer_id | and t_s_firstyear.sale_type = 's' | and t_w_firstyear.sale_type = 'w' | and t_s_secyear.sale_type = 's' | and t_w_secyear.sale_type = 'w' | and t_s_firstyear.year = 2001 | and t_s_secyear.year = 2001+1 | and t_w_firstyear.year = 2001 | and t_w_secyear.year = 2001+1 | and t_s_firstyear.year_total > 0 | and t_w_firstyear.year_total > 0 | and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / | t_w_firstyear.year_total else null end | > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / | t_s_firstyear.year_total else null end | order by 1, 1, 1 | limit 100 """.stripMargin), ("q75", """ | WITH all_sales AS ( | SELECT | d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id, | SUM(sales_cnt) AS sales_cnt, SUM(sales_amt) AS sales_amt | FROM ( | SELECT | d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id, | cs_quantity - COALESCE(cr_return_quantity,0) AS sales_cnt, | cs_ext_sales_price - COALESCE(cr_return_amount,0.0) AS sales_amt | FROM catalog_sales | JOIN item ON i_item_sk=cs_item_sk | JOIN date_dim ON d_date_sk=cs_sold_date_sk | LEFT JOIN catalog_returns ON (cs_order_number=cr_order_number | AND cs_item_sk=cr_item_sk) | WHERE i_category='Books' | UNION | SELECT | d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id, | ss_quantity - COALESCE(sr_return_quantity,0) AS sales_cnt, | ss_ext_sales_price - COALESCE(sr_return_amt,0.0) AS sales_amt | FROM store_sales | JOIN item ON i_item_sk=ss_item_sk | JOIN date_dim ON d_date_sk=ss_sold_date_sk | LEFT JOIN store_returns ON (ss_ticket_number=sr_ticket_number | AND ss_item_sk=sr_item_sk) | WHERE i_category='Books' | UNION | SELECT | d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id, | ws_quantity - COALESCE(wr_return_quantity,0) AS sales_cnt, | ws_ext_sales_price - COALESCE(wr_return_amt,0.0) AS sales_amt | FROM web_sales | JOIN item ON i_item_sk=ws_item_sk | JOIN date_dim ON d_date_sk=ws_sold_date_sk | LEFT JOIN web_returns ON (ws_order_number=wr_order_number | AND ws_item_sk=wr_item_sk) | WHERE i_category='Books') sales_detail | GROUP BY d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id) | SELECT | prev_yr.d_year AS prev_year, curr_yr.d_year AS year, curr_yr.i_brand_id, | curr_yr.i_class_id, curr_yr.i_category_id, curr_yr.i_manufact_id, | prev_yr.sales_cnt AS prev_yr_cnt, curr_yr.sales_cnt AS curr_yr_cnt, | curr_yr.sales_cnt-prev_yr.sales_cnt AS sales_cnt_diff, | curr_yr.sales_amt-prev_yr.sales_amt AS sales_amt_diff | FROM all_sales curr_yr, all_sales prev_yr | WHERE curr_yr.i_brand_id=prev_yr.i_brand_id | AND curr_yr.i_class_id=prev_yr.i_class_id | AND curr_yr.i_category_id=prev_yr.i_category_id | AND curr_yr.i_manufact_id=prev_yr.i_manufact_id | AND curr_yr.d_year=2002 | AND prev_yr.d_year=2002-1 | AND CAST(curr_yr.sales_cnt AS DECIMAL(17,2))/CAST(prev_yr.sales_cnt AS DECIMAL(17,2)) | <0.9 | ORDER BY sales_cnt_diff | LIMIT 100 """.stripMargin), ("q76", """ | SELECT | channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt, | SUM(ext_sales_price) sales_amt | FROM( | SELECT | 'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category, | ss_ext_sales_price ext_sales_price | FROM store_sales, item, date_dim | WHERE ss_store_sk IS NULL | AND ss_sold_date_sk=d_date_sk | AND ss_item_sk=i_item_sk | UNION ALL | SELECT | 'web' as channel, ws_ship_customer_sk col_name, d_year, d_qoy, i_category, | ws_ext_sales_price ext_sales_price | FROM web_sales, item, date_dim | WHERE ws_ship_customer_sk IS NULL | AND ws_sold_date_sk=d_date_sk | AND ws_item_sk=i_item_sk | UNION ALL | SELECT | 'catalog' as channel, cs_ship_addr_sk col_name, d_year, d_qoy, i_category, | cs_ext_sales_price ext_sales_price | FROM catalog_sales, item, date_dim | WHERE cs_ship_addr_sk IS NULL | AND cs_sold_date_sk=d_date_sk | AND cs_item_sk=i_item_sk) foo | GROUP BY channel, col_name, d_year, d_qoy, i_category | ORDER BY channel, col_name, d_year, d_qoy, i_category | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q77", """ | with ss as | (select s_store_sk, sum(ss_ext_sales_price) as sales, sum(ss_net_profit) as profit | from store_sales, date_dim, store | where ss_sold_date_sk = d_date_sk | and d_date between cast('2000-08-03' as date) and | (cast('2000-08-03' as date) + interval 30 days) | and ss_store_sk = s_store_sk | group by s_store_sk), | sr as | (select s_store_sk, sum(sr_return_amt) as returns, sum(sr_net_loss) as profit_loss | from store_returns, date_dim, store | where sr_returned_date_sk = d_date_sk | and d_date between cast('2000-08-03' as date) and | (cast('2000-08-03' as date) + interval 30 days) | and sr_store_sk = s_store_sk | group by s_store_sk), | cs as | (select cs_call_center_sk, sum(cs_ext_sales_price) as sales, sum(cs_net_profit) as profit | from catalog_sales, date_dim | where cs_sold_date_sk = d_date_sk | and d_date between cast('2000-08-03' as date) and | (cast('2000-08-03' as date) + interval 30 days) | group by cs_call_center_sk), | cr as | (select sum(cr_return_amount) as returns, sum(cr_net_loss) as profit_loss | from catalog_returns, date_dim | where cr_returned_date_sk = d_date_sk | and d_date between cast('2000-08-03]' as date) and | (cast('2000-08-03' as date) + interval 30 day)), | ws as | (select wp_web_page_sk, sum(ws_ext_sales_price) as sales, sum(ws_net_profit) as profit | from web_sales, date_dim, web_page | where ws_sold_date_sk = d_date_sk | and d_date between cast('2000-08-03' as date) and | (cast('2000-08-03' as date) + interval 30 days) | and ws_web_page_sk = wp_web_page_sk | group by wp_web_page_sk), | wr as | (select wp_web_page_sk, sum(wr_return_amt) as returns, sum(wr_net_loss) as profit_loss | from web_returns, date_dim, web_page | where wr_returned_date_sk = d_date_sk | and d_date between cast('2000-08-03' as date) and | (cast('2000-08-03' as date) + interval 30 days) | and wr_web_page_sk = wp_web_page_sk | group by wp_web_page_sk) | select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit | from | (select | 'store channel' as channel, ss.s_store_sk as id, sales, | coalesce(returns, 0) as returns, (profit - coalesce(profit_loss,0)) as profit | from ss left join sr | on ss.s_store_sk = sr.s_store_sk | union all | select | 'catalog channel' as channel, cs_call_center_sk as id, sales, | returns, (profit - profit_loss) as profit | from cs, cr | union all | select | 'web channel' as channel, ws.wp_web_page_sk as id, sales, | coalesce(returns, 0) returns, (profit - coalesce(profit_loss,0)) as profit | from ws left join wr | on ws.wp_web_page_sk = wr.wp_web_page_sk | ) x | group by rollup(channel, id) | order by channel, id | limit 100 """.stripMargin), ("q78", """ | with ws as | (select d_year AS ws_sold_year, ws_item_sk, | ws_bill_customer_sk ws_customer_sk, | sum(ws_quantity) ws_qty, | sum(ws_wholesale_cost) ws_wc, | sum(ws_sales_price) ws_sp | from web_sales | left join web_returns on wr_order_number=ws_order_number and ws_item_sk=wr_item_sk | join date_dim on ws_sold_date_sk = d_date_sk | where wr_order_number is null | group by d_year, ws_item_sk, ws_bill_customer_sk | ), | cs as | (select d_year AS cs_sold_year, cs_item_sk, | cs_bill_customer_sk cs_customer_sk, | sum(cs_quantity) cs_qty, | sum(cs_wholesale_cost) cs_wc, | sum(cs_sales_price) cs_sp | from catalog_sales | left join catalog_returns on cr_order_number=cs_order_number and cs_item_sk=cr_item_sk | join date_dim on cs_sold_date_sk = d_date_sk | where cr_order_number is null | group by d_year, cs_item_sk, cs_bill_customer_sk | ), | ss as | (select d_year AS ss_sold_year, ss_item_sk, | ss_customer_sk, | sum(ss_quantity) ss_qty, | sum(ss_wholesale_cost) ss_wc, | sum(ss_sales_price) ss_sp | from store_sales | left join store_returns on sr_ticket_number=ss_ticket_number and ss_item_sk=sr_item_sk | join date_dim on ss_sold_date_sk = d_date_sk | where sr_ticket_number is null | group by d_year, ss_item_sk, ss_customer_sk | ) | select | round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2) ratio, | ss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price, | coalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty, | coalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost, | coalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price | from ss | left join ws on (ws_sold_year=ss_sold_year and ws_item_sk=ss_item_sk and | ws_customer_sk=ss_customer_sk) | left join cs on (cs_sold_year=ss_sold_year and ss_item_sk=cs_item_sk and | cs_customer_sk=ss_customer_sk) | where coalesce(ws_qty,0)>0 and coalesce(cs_qty, 0)>0 and ss_sold_year=2000 | order by | ratio, | ss_qty desc, ss_wc desc, ss_sp desc, | other_chan_qty, | other_chan_wholesale_cost, | other_chan_sales_price, | round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2) | limit 100 """.stripMargin), ("q79", """ | select | c_last_name,c_first_name,substr(s_city,1,30),ss_ticket_number,amt,profit | from | (select ss_ticket_number | ,ss_customer_sk | ,store.s_city | ,sum(ss_coupon_amt) amt | ,sum(ss_net_profit) profit | from store_sales,date_dim,store,household_demographics | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_store_sk = store.s_store_sk | and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk | and (household_demographics.hd_dep_count = 6 or | household_demographics.hd_vehicle_count > 2) | and date_dim.d_dow = 1 | and date_dim.d_year in (1999,1999+1,1999+2) | and store.s_number_employees between 200 and 295 | group by ss_ticket_number,ss_customer_sk,ss_addr_sk,store.s_city) ms,customer | where ss_customer_sk = c_customer_sk | order by c_last_name,c_first_name,substr(s_city,1,30), profit | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add // Modifications: "||" -> "concat" ("q80", """ | with ssr as | (select s_store_id as store_id, | sum(ss_ext_sales_price) as sales, | sum(coalesce(sr_return_amt, 0)) as returns, | sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit | from store_sales left outer join store_returns on | (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number), | date_dim, store, item, promotion | where ss_sold_date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and (cast('2000-08-23' as date) + interval 30 days) | and ss_store_sk = s_store_sk | and ss_item_sk = i_item_sk | and i_current_price > 50 | and ss_promo_sk = p_promo_sk | and p_channel_tv = 'N' | group by s_store_id), | csr as | (select cp_catalog_page_id as catalog_page_id, | sum(cs_ext_sales_price) as sales, | sum(coalesce(cr_return_amount, 0)) as returns, | sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit | from catalog_sales left outer join catalog_returns on | (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number), | date_dim, catalog_page, item, promotion | where cs_sold_date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and (cast('2000-08-23' as date) + interval 30 days) | and cs_catalog_page_sk = cp_catalog_page_sk | and cs_item_sk = i_item_sk | and i_current_price > 50 | and cs_promo_sk = p_promo_sk | and p_channel_tv = 'N' | group by cp_catalog_page_id), | wsr as | (select web_site_id, | sum(ws_ext_sales_price) as sales, | sum(coalesce(wr_return_amt, 0)) as returns, | sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit | from web_sales left outer join web_returns on | (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number), | date_dim, web_site, item, promotion | where ws_sold_date_sk = d_date_sk | and d_date between cast('2000-08-23' as date) | and (cast('2000-08-23' as date) + interval 30 days) | and ws_web_site_sk = web_site_sk | and ws_item_sk = i_item_sk | and i_current_price > 50 | and ws_promo_sk = p_promo_sk | and p_channel_tv = 'N' | group by web_site_id) | select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit | from (select | 'store channel' as channel, concat('store', store_id) as id, sales, returns, profit | from ssr | union all | select | 'catalog channel' as channel, concat('catalog_page', catalog_page_id) as id, | sales, returns, profit | from csr | union all | select | 'web channel' as channel, concat('web_site', web_site_id) as id, sales, returns, | profit | from wsr) x | group by rollup (channel, id) | order by channel, id | limit 100 """.stripMargin), ("q81", """ | with customer_total_return as | (select | cr_returning_customer_sk as ctr_customer_sk, ca_state as ctr_state, | sum(cr_return_amt_inc_tax) as ctr_total_return | from catalog_returns, date_dim, customer_address | where cr_returned_date_sk = d_date_sk | and d_year = 2000 | and cr_returning_addr_sk = ca_address_sk | group by cr_returning_customer_sk, ca_state ) | select | c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name, | ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country, | ca_gmt_offset,ca_location_type,ctr_total_return | from customer_total_return ctr1, customer_address, customer | where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2 | from customer_total_return ctr2 | where ctr1.ctr_state = ctr2.ctr_state) | and ca_address_sk = c_current_addr_sk | and ca_state = 'GA' | and ctr1.ctr_customer_sk = c_customer_sk | order by c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number, | ca_street_name | ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip, | ca_country,ca_gmt_offset | ,ca_location_type,ctr_total_return | limit 100 """.stripMargin), ("q82", """ | select i_item_id, i_item_desc, i_current_price | from item, inventory, date_dim, store_sales | where i_current_price between 62 and 62+30 | and inv_item_sk = i_item_sk | and d_date_sk=inv_date_sk | and d_date between cast('2000-05-25' as date) and (cast('2000-05-25' as date) + | interval 60 days) | and i_manufact_id in (129, 270, 821, 423) | and inv_quantity_on_hand between 100 and 500 | and ss_item_sk = i_item_sk | group by i_item_id,i_item_desc,i_current_price | order by i_item_id | limit 100 """.stripMargin), ("q83", """ | with sr_items as | (select i_item_id item_id, sum(sr_return_quantity) sr_item_qty | from store_returns, item, date_dim | where sr_item_sk = i_item_sk | and d_date in (select d_date from date_dim where d_week_seq in | (select d_week_seq from date_dim where d_date in ('2000-06-30', | '2000-09-27','2000-11-17'))) | and sr_returned_date_sk = d_date_sk | group by i_item_id), | cr_items as | (select i_item_id item_id, sum(cr_return_quantity) cr_item_qty | from catalog_returns, item, date_dim | where cr_item_sk = i_item_sk | and d_date in (select d_date from date_dim where d_week_seq in | (select d_week_seq from date_dim where d_date in ('2000-06-30', | '2000-09-27','2000-11-17'))) | and cr_returned_date_sk = d_date_sk | group by i_item_id), | wr_items as | (select i_item_id item_id, sum(wr_return_quantity) wr_item_qty | from web_returns, item, date_dim | where wr_item_sk = i_item_sk and d_date in | (select d_date from date_dim where d_week_seq in | (select d_week_seq from date_dim where d_date in ('2000-06-30', | '2000-09-27','2000-11-17'))) | and wr_returned_date_sk = d_date_sk | group by i_item_id) | select sr_items.item_id | ,sr_item_qty | ,sr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 sr_dev | ,cr_item_qty | ,cr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 cr_dev | ,wr_item_qty | ,wr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 wr_dev | ,(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 average | from sr_items, cr_items, wr_items | where sr_items.item_id=cr_items.item_id | and sr_items.item_id=wr_items.item_id | order by sr_items.item_id, sr_item_qty | limit 100 """.stripMargin), // Modifications: "||" -> concat ("q84", """ | select c_customer_id as customer_id | ,concat(c_last_name, ', ', c_first_name) as customername | from customer | ,customer_address | ,customer_demographics | ,household_demographics | ,income_band | ,store_returns | where ca_city = 'Edgewood' | and c_current_addr_sk = ca_address_sk | and ib_lower_bound >= 38128 | and ib_upper_bound <= 38128 + 50000 | and ib_income_band_sk = hd_income_band_sk | and cd_demo_sk = c_current_cdemo_sk | and hd_demo_sk = c_current_hdemo_sk | and sr_cdemo_sk = cd_demo_sk | order by c_customer_id | limit 100 """.stripMargin), ("q85", """ | select | substr(r_reason_desc,1,20), avg(ws_quantity), avg(wr_refunded_cash), avg(wr_fee) | from web_sales, web_returns, web_page, customer_demographics cd1, | customer_demographics cd2, customer_address, date_dim, reason | where ws_web_page_sk = wp_web_page_sk | and ws_item_sk = wr_item_sk | and ws_order_number = wr_order_number | and ws_sold_date_sk = d_date_sk and d_year = 2000 | and cd1.cd_demo_sk = wr_refunded_cdemo_sk | and cd2.cd_demo_sk = wr_returning_cdemo_sk | and ca_address_sk = wr_refunded_addr_sk | and r_reason_sk = wr_reason_sk | and | ( | ( | cd1.cd_marital_status = 'M' | and | cd1.cd_marital_status = cd2.cd_marital_status | and | cd1.cd_education_status = 'Advanced Degree' | and | cd1.cd_education_status = cd2.cd_education_status | and | ws_sales_price between 100.00 and 150.00 | ) | or | ( | cd1.cd_marital_status = 'S' | and | cd1.cd_marital_status = cd2.cd_marital_status | and | cd1.cd_education_status = 'College' | and | cd1.cd_education_status = cd2.cd_education_status | and | ws_sales_price between 50.00 and 100.00 | ) | or | ( | cd1.cd_marital_status = 'W' | and | cd1.cd_marital_status = cd2.cd_marital_status | and | cd1.cd_education_status = '2 yr Degree' | and | cd1.cd_education_status = cd2.cd_education_status | and | ws_sales_price between 150.00 and 200.00 | ) | ) | and | ( | ( | ca_country = 'United States' | and | ca_state in ('IN', 'OH', 'NJ') | and ws_net_profit between 100 and 200 | ) | or | ( | ca_country = 'United States' | and | ca_state in ('WI', 'CT', 'KY') | and ws_net_profit between 150 and 300 | ) | or | ( | ca_country = 'United States' | and | ca_state in ('LA', 'IA', 'AR') | and ws_net_profit between 50 and 250 | ) | ) | group by r_reason_desc | order by substr(r_reason_desc,1,20) | ,avg(ws_quantity) | ,avg(wr_refunded_cash) | ,avg(wr_fee) | limit 100 """.stripMargin), ("q86", """ | select sum(ws_net_paid) as total_sum, i_category, i_class, | grouping(i_category)+grouping(i_class) as lochierarchy, | rank() over ( | partition by grouping(i_category)+grouping(i_class), | case when grouping(i_class) = 0 then i_category end | order by sum(ws_net_paid) desc) as rank_within_parent | from | web_sales, date_dim d1, item | where | d1.d_month_seq between 1200 and 1200+11 | and d1.d_date_sk = ws_sold_date_sk | and i_item_sk = ws_item_sk | group by rollup(i_category,i_class) | order by | lochierarchy desc, | case when lochierarchy = 0 then i_category end, | rank_within_parent | limit 100 """.stripMargin), ("q87", """ | select count(*) | from ((select distinct c_last_name, c_first_name, d_date | from store_sales, date_dim, customer | where store_sales.ss_sold_date_sk = date_dim.d_date_sk | and store_sales.ss_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200+11) | except | (select distinct c_last_name, c_first_name, d_date | from catalog_sales, date_dim, customer | where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk | and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200+11) | except | (select distinct c_last_name, c_first_name, d_date | from web_sales, date_dim, customer | where web_sales.ws_sold_date_sk = date_dim.d_date_sk | and web_sales.ws_bill_customer_sk = customer.c_customer_sk | and d_month_seq between 1200 and 1200+11) |) cool_cust """.stripMargin), ("q88", """ | select * | from | (select count(*) h8_30_to_9 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 8 | and time_dim.t_minute >= 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s1, | (select count(*) h9_to_9_30 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 9 | and time_dim.t_minute < 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s2, | (select count(*) h9_30_to_10 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 9 | and time_dim.t_minute >= 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s3, | (select count(*) h10_to_10_30 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 10 | and time_dim.t_minute < 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s4, | (select count(*) h10_30_to_11 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 10 | and time_dim.t_minute >= 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s5, | (select count(*) h11_to_11_30 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 11 | and time_dim.t_minute < 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s6, | (select count(*) h11_30_to_12 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 11 | and time_dim.t_minute >= 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s7, | (select count(*) h12_to_12_30 | from store_sales, household_demographics , time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 12 | and time_dim.t_minute < 30 | and ((household_demographics.hd_dep_count = 4 and household_demographics | .hd_vehicle_count<=4+2) or | (household_demographics.hd_dep_count = 2 and household_demographics | .hd_vehicle_count<=2+2) or | (household_demographics.hd_dep_count = 0 and household_demographics | .hd_vehicle_count<=0+2)) | and store.s_store_name = 'ese') s8 """.stripMargin), ("q89", """ | select * | from( | select i_category, i_class, i_brand, | s_store_name, s_company_name, | d_moy, | sum(ss_sales_price) sum_sales, | avg(sum(ss_sales_price)) over | (partition by i_category, i_brand, s_store_name, s_company_name) | avg_monthly_sales | from item, store_sales, date_dim, store | where ss_item_sk = i_item_sk and | ss_sold_date_sk = d_date_sk and | ss_store_sk = s_store_sk and | d_year in (1999) and | ((i_category in ('Books','Electronics','Sports') and | i_class in ('computers','stereo','football')) | or (i_category in ('Men','Jewelry','Women') and | i_class in ('shirts','birdal','dresses'))) | group by i_category, i_class, i_brand, | s_store_name, s_company_name, d_moy) tmp1 | where case when (avg_monthly_sales <> 0) then (abs(sum_sales - avg_monthly_sales) / | avg_monthly_sales) else null end > 0.1 | order by sum_sales - avg_monthly_sales, s_store_name | limit 100 """.stripMargin), ("q90", """ | select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio | from ( select count(*) amc | from web_sales, household_demographics , time_dim, web_page | where ws_sold_time_sk = time_dim.t_time_sk | and ws_ship_hdemo_sk = household_demographics.hd_demo_sk | and ws_web_page_sk = web_page.wp_web_page_sk | and time_dim.t_hour between 8 and 8+1 | and household_demographics.hd_dep_count = 6 | and web_page.wp_char_count between 5000 and 5200) at, | ( select count(*) pmc | from web_sales, household_demographics , time_dim, web_page | where ws_sold_time_sk = time_dim.t_time_sk | and ws_ship_hdemo_sk = household_demographics.hd_demo_sk | and ws_web_page_sk = web_page.wp_web_page_sk | and time_dim.t_hour between 19 and 19+1 | and household_demographics.hd_dep_count = 6 | and web_page.wp_char_count between 5000 and 5200) pt | order by am_pm_ratio | limit 100 """.stripMargin), ("q91", """ | select | cc_call_center_id Call_Center, cc_name Call_Center_Name, cc_manager Manager, | sum(cr_net_loss) Returns_Loss | from | call_center, catalog_returns, date_dim, customer, customer_address, | customer_demographics, household_demographics | where | cr_call_center_sk = cc_call_center_sk | and cr_returned_date_sk = d_date_sk | and cr_returning_customer_sk = c_customer_sk | and cd_demo_sk = c_current_cdemo_sk | and hd_demo_sk = c_current_hdemo_sk | and ca_address_sk = c_current_addr_sk | and d_year = 1998 | and d_moy = 11 | and ( (cd_marital_status = 'M' and cd_education_status = 'Unknown') | or(cd_marital_status = 'W' and cd_education_status = 'Advanced Degree')) | and hd_buy_potential like 'Unknown%' | and ca_gmt_offset = -7 | group by cc_call_center_id,cc_name,cc_manager,cd_marital_status,cd_education_status | order by sum(cr_net_loss) desc """.stripMargin), // Modifications: "+ days" -> date_add // Modifications: " -> ` ("q92", """ | select sum(ws_ext_discount_amt) as `Excess Discount Amount` | from web_sales, item, date_dim | where i_manufact_id = 350 | and i_item_sk = ws_item_sk | and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days) | and d_date_sk = ws_sold_date_sk | and ws_ext_discount_amt > | ( | SELECT 1.3 * avg(ws_ext_discount_amt) | FROM web_sales, date_dim | WHERE ws_item_sk = i_item_sk | and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 | days) | and d_date_sk = ws_sold_date_sk | ) | order by sum(ws_ext_discount_amt) | limit 100 """.stripMargin), ("q93", """ | select ss_customer_sk, sum(act_sales) sumsales | from (select | ss_item_sk, ss_ticket_number, ss_customer_sk, | case when sr_return_quantity is not null then (ss_quantity-sr_return_quantity) | *ss_sales_price | else (ss_quantity*ss_sales_price) end | act_sales | from store_sales | left outer join store_returns | on (sr_item_sk = ss_item_sk and sr_ticket_number = ss_ticket_number), | reason | where sr_reason_sk = r_reason_sk and r_reason_desc = 'reason 28') t | group by ss_customer_sk | order by sumsales, ss_customer_sk | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add // Modifications: " -> ` ("q94", """ | select | count(distinct ws_order_number) as `order count` | ,sum(ws_ext_ship_cost) as `total shipping cost` | ,sum(ws_net_profit) as `total net profit` | from | web_sales ws1, date_dim, customer_address, web_site | where | d_date between '1999-02-01' and | (cast('1999-02-01' as date) + interval 60 days) | and ws1.ws_ship_date_sk = d_date_sk | and ws1.ws_ship_addr_sk = ca_address_sk | and ca_state = 'IL' | and ws1.ws_web_site_sk = web_site_sk | and web_company_name = 'pri' | and exists (select * | from web_sales ws2 | where ws1.ws_order_number = ws2.ws_order_number | and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk) | and not exists(select * | from web_returns wr1 | where ws1.ws_order_number = wr1.wr_order_number) | order by count(distinct ws_order_number) | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q95", """ | with ws_wh as | (select ws1.ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2 | from web_sales ws1,web_sales ws2 | where ws1.ws_order_number = ws2.ws_order_number | and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk) | select | count(distinct ws_order_number) as `order count` | ,sum(ws_ext_ship_cost) as `total shipping cost` | ,sum(ws_net_profit) as `total net profit` | from | web_sales ws1, date_dim, customer_address, web_site | where | d_date between '1999-02-01' and | (cast('1999-02-01' as date) + interval 60 days) | and ws1.ws_ship_date_sk = d_date_sk | and ws1.ws_ship_addr_sk = ca_address_sk | and ca_state = 'IL' | and ws1.ws_web_site_sk = web_site_sk | and web_company_name = 'pri' | and ws1.ws_order_number in (select ws_order_number | from ws_wh) | and ws1.ws_order_number in (select wr_order_number | from web_returns,ws_wh | where wr_order_number = ws_wh.ws_order_number) | order by count(distinct ws_order_number) | limit 100 """.stripMargin), ("q96", """ | select count(*) | from store_sales, household_demographics, time_dim, store | where ss_sold_time_sk = time_dim.t_time_sk | and ss_hdemo_sk = household_demographics.hd_demo_sk | and ss_store_sk = s_store_sk | and time_dim.t_hour = 20 | and time_dim.t_minute >= 30 | and household_demographics.hd_dep_count = 7 | and store.s_store_name = 'ese' | order by count(*) | limit 100 """.stripMargin), ("q97", """ | with ssci as ( | select ss_customer_sk customer_sk, ss_item_sk item_sk | from store_sales,date_dim | where ss_sold_date_sk = d_date_sk | and d_month_seq between 1200 and 1200 + 11 | group by ss_customer_sk, ss_item_sk), | csci as( | select cs_bill_customer_sk customer_sk, cs_item_sk item_sk | from catalog_sales,date_dim | where cs_sold_date_sk = d_date_sk | and d_month_seq between 1200 and 1200 + 11 | group by cs_bill_customer_sk, cs_item_sk) | select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 | else 0 end) store_only | ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 | else 0 end) catalog_only | ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then | 1 else 0 end) store_and_catalog | from ssci full outer join csci on (ssci.customer_sk=csci.customer_sk | and ssci.item_sk = csci.item_sk) | limit 100 """.stripMargin), // Modifications: "+ days" -> date_add ("q98", """ |select i_item_desc, i_category, i_class, i_current_price | ,sum(ss_ext_sales_price) as itemrevenue | ,sum(ss_ext_sales_price)*100/sum(sum(ss_ext_sales_price)) over | (partition by i_class) as revenueratio |from | store_sales, item, date_dim |where | ss_item_sk = i_item_sk | and i_category in ('Sports', 'Books', 'Home') | and ss_sold_date_sk = d_date_sk | and d_date between cast('1999-02-22' as date) | and (cast('1999-02-22' as date) + interval 30 days) |group by | i_item_id, i_item_desc, i_category, i_class, i_current_price |order by | i_category, i_class, i_item_id, i_item_desc, revenueratio """.stripMargin), // Modifications: " -> ` ("q99", """ | select | substr(w_warehouse_name,1,20), sm_type, cc_name | ,sum(case when (cs_ship_date_sk - cs_sold_date_sk <= 30 ) then 1 else 0 end) as `30 | days` | ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 30) and | (cs_ship_date_sk - cs_sold_date_sk <= 60) then 1 else 0 end ) as | `31-60 days` | ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 60) and | (cs_ship_date_sk - cs_sold_date_sk <= 90) then 1 else 0 end) as | `61-90 days` | ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 90) and | (cs_ship_date_sk - cs_sold_date_sk <= 120) then 1 else 0 end) as | `91-120 days` | ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 120) then 1 else 0 end) as | `>120 days` | from | catalog_sales, warehouse, ship_mode, call_center, date_dim | where | d_month_seq between 1200 and 1200 + 11 | and cs_ship_date_sk = d_date_sk | and cs_warehouse_sk = w_warehouse_sk | and cs_ship_mode_sk = sm_ship_mode_sk | and cs_call_center_sk = cc_call_center_sk | group by | substr(w_warehouse_name,1,20), sm_type, cc_name | order by substr(w_warehouse_name,1,20), sm_type, cc_name | limit 100 """.stripMargin), ("qMV1", """ |SELECT cs_ship_addr_sk , d_date, d_year, d_qoy, d_moy, i_category, cs_ship_addr_sk, |i_item_sk, i_item_id, | i_item_desc, i_class, i_current_price, i_brand_id, i_class_id, i_category_id, | i_manufact_id, | SUM(cs_ext_sales_price) sales_amt, | SUM(cs_quantity*cs_list_price) sales, | SUM(cs_ext_discount_amt) as `excess discount amount`, | count(*) number_sales |FROM catalog_sales, item, date_dim |WHERE cs_item_sk = i_item_sk | AND cs_sold_date_sk = d_date_sk |GROUP BY i_brand_id, i_class_id, i_category_id, i_item_id, i_item_desc, i_category, |i_class, | i_current_price, i_manufact_id, d_date, d_moy, d_qoy, d_year, cs_ship_addr_sk, | i_item_sk """.stripMargin.trim), ("qMV2", """ | SELECT | 'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category, | SUM(ss_ext_sales_price) ext_sales_price | FROM store_sales, item, date_dim | WHERE ss_store_sk IS NULL | AND ss_sold_date_sk=d_date_sk | AND ss_item_sk=i_item_sk | GROUP BY ss_store_sk, d_year, d_qoy, i_category """.stripMargin.trim), // this query shows a bug in modular plan where qualifier subq not clean up in erasure of // sub-query alias in logical plan ("qMV3", """ |SELECT first(subq.channel), subq.col_name, subq.d_year, subq.d_qoy, subq.i_category, SUM |(subq.ss_ext_sales_price) sales_amt |FROM | (SELECT | 'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category, | ss_ext_sales_price | FROM store_sales, item, date_dim | WHERE ss_store_sk IS NULL | AND ss_sold_date_sk=d_date_sk | AND ss_item_sk=i_item_sk) subq | GROUP BY col_name, d_year, d_qoy, i_category | ORDER BY col_name, d_year, d_qoy, i_category | limit 100 """.stripMargin.trim), ("qVerify", """ |SELECT gen_subquery_0.`c_last_name`, gen_subquery_0.`c_first_name`, |gen_subquery_0.`d_date`, gen_subquery_1.`c_last_name`, gen_subquery_1.`c_first_name`, |gen_subquery_1.`d_date` | FROM | (SELECT gen_subsumer_0.`customer_last_name` AS `c_last_name`, | gen_subsumer_0.`customer_first_name` AS `c_first_name`, gen_subsumer_0.`ddate` | AS `d_date` | FROM | (SELECT `c_customer_id` AS `customer_id`, `c_first_name` AS | `customer_first_name`, `c_last_name` AS `customer_last_name`, | `c_preferred_cust_flag` AS `customer_preferred_cust_flag`, `c_birth_country` | AS `customer_birth_country`, `c_login` AS `customer_login`, | `c_email_address` AS `customer_email_address`, `d_year` AS `dyear`, | `d_date` AS `ddate`, `d_month_seq`, sum((CAST((((CAST(`ss_ext_list_price` | AS DECIMAL(8,2)) - CAST(`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST | (`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(`ss_ext_sales_price` AS | DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, sum | (`ss_net_paid`) AS `year_total_74`, 's' AS `sale_type` | FROM | customer | INNER JOIN store_sales ON (`c_customer_sk` = `ss_customer_sk`) | INNER JOIN date_dim ON (`ss_sold_date_sk` = `d_date_sk`) | GROUP BY `c_customer_id`, `c_first_name`, `c_last_name`, | `c_preferred_cust_flag`, `c_birth_country`, `c_login`, `c_email_address`, | `d_year`, `d_date`, `d_month_seq`) gen_subsumer_0 | WHERE | (gen_subsumer_0.`d_month_seq` >= 1200) AND (gen_subsumer_0.`d_month_seq` <= | 1211) | GROUP BY gen_subsumer_0.`customer_last_name`, | gen_subsumer_0.`customer_first_name`, gen_subsumer_0.`ddate`) gen_subquery_0 | LEFT ANTI JOIN (SELECT customer.`c_last_name`, customer.`c_first_name`, | date_dim.`d_date` | FROM | catalog_sales | INNER JOIN date_dim ON (date_dim.`d_month_seq` >= 1200) AND (date_dim | .`d_month_seq` <= 1211) AND (catalog_sales.`cs_sold_date_sk` = date_dim | .`d_date_sk`) | INNER JOIN customer ON (catalog_sales.`cs_bill_customer_sk` = customer | .`c_customer_sk`) | GROUP BY customer.`c_last_name`, customer.`c_first_name`, date_dim.`d_date`) | gen_subquery_1 ON (gen_subquery_0.`c_last_name` <=> | gen_subquery_1.`c_last_name`) AND (gen_subquery_0.`c_first_name` <=> | gen_subquery_1.`c_first_name`) AND (gen_subquery_0.`d_date` <=> | gen_subquery_1.`d_date`) """.stripMargin), ("qSsMax", """ |select | count(*) as total, | count(ss_sold_date_sk) as not_null_total, | count(distinct ss_sold_date_sk) as unique_days, | max(ss_sold_date_sk) as max_ss_sold_date_sk, | max(ss_sold_time_sk) as max_ss_sold_time_sk, | max(ss_item_sk) as max_ss_item_sk, | max(ss_customer_sk) as max_ss_customer_sk, | max(ss_cdemo_sk) as max_ss_cdemo_sk, | max(ss_hdemo_sk) as max_ss_hdemo_sk, | max(ss_addr_sk) as max_ss_addr_sk, | max(ss_store_sk) as max_ss_store_sk, | max(ss_promo_sk) as max_ss_promo_sk |from store_sales """.stripMargin), ("qTradeflow", """ |SELECT * |FROM ( | SELECT DISTINCT country_show_cn | ,country | ,( | CASE WHEN up.startdate <= '201401' | AND up.newdate >= '201412' THEN CASE WHEN isnan(colunm_2014) THEN 0 ELSE | colunm_2014 END ELSE NULL END | ) AS colunm_2014 | ,( | CASE WHEN up.startdate <= '201501' | AND up.newdate >= '201512' THEN CASE WHEN isnan(colunm_2015) THEN 0 ELSE | colunm_2015 END ELSE NULL END | ) AS colunm_2015 | ,( | CASE WHEN up.startdate <= '201601' | AND up.newdate >= '201612' THEN CASE WHEN isnan(colunm_2016) THEN 0 ELSE | colunm_2016 END ELSE NULL END | ) AS colunm_2016 | ,tb | ,concat_ws('-', up.startdate, up.newdate) AS dbupdate | FROM ( | SELECT a.country AS countryid | ,c.country_cn AS country_show_cn | ,c.country_en AS country | ,sum(v2014) AS colunm_2014 | ,sum(v2015) AS colunm_2015 | ,sum(v2016) AS colunm_2016 | ,(sum(v2016) - sum(v2015)) / sum(v2015) AS tb | FROM ( | SELECT b_country AS Country | ,sum(CASE WHEN y_year = 2014 THEN dollar_value ELSE 0 END) AS v2014 | ,sum(CASE WHEN y_year = 2015 THEN dollar_value ELSE 0 END) AS v2015 | ,sum(CASE WHEN y_year = 2016 THEN dollar_value ELSE 0 END) AS v2016 | FROM tradeflow_all | WHERE imex = 0 | AND ( | y_year = 2014 | OR y_year = 2015 | OR y_year = 2016 | ) | GROUP BY b_country | ,y_year | ) a | LEFT JOIN country c ON (a.country = c.countryid) | GROUP BY country_show_cn | ,country | ,countryid | ,country_en | ) w | LEFT JOIN updatetime up ON ( | w.countryid = up.countryid | AND imex = 0 | ) | WHERE !(isnan(colunm_2014) | AND isnan(colunm_2015) | AND isnan(colunm_2016)) | AND ( | colunm_2014 <> 0 | OR colunm_2015 <> 0 | OR colunm_2016 <> 0 | ) | ) f |WHERE colunm_2014 IS NOT NULL | OR colunm_2015 IS NOT NULL | OR colunm_2016 IS NOT NULL """.stripMargin), ("qAggPushDown", """ | SELECT | channel, d_year, d_qoy, i_category, SUM(ext_sales_price) sales_amt | FROM( | SELECT | 'store' as channel, d_year, d_qoy, i_category, | ss_ext_sales_price ext_sales_price | FROM store_sales, item, date_dim | WHERE ss_store_sk IS NULL | AND ss_sold_date_sk=d_date_sk | AND ss_item_sk=i_item_sk) foo | GROUP BY channel, d_year, d_qoy, i_category | ORDER BY channel, d_year, d_qoy, i_category | limit 100 """.stripMargin), ("qJoin", """ | SELECT | i_category, | ss_ext_sales_price ext_sales_price | FROM store_sales, item | WHERE ss_item_sk=i_item_sk """.stripMargin), ("qJoin1", """ | SELECT gen_subsumer_0.`i_category` | FROM | (SELECT | item.`i_category`, | store_sales.`ss_ext_sales_price` ext_sales_price | FROM store_sales, item | WHERE store_sales.`ss_item_sk`=item.`i_item_sk`) gen_subsumer_0 """.stripMargin), ("qCarbon", """ |SELECT gen_subsumer_0.`c1` AS `c1`, gen_subsumer_0.`designation` |FROM | (SELECT t1.`empname` AS `c1`, t2.`designation`, t2.`empname` AS `c2` | FROM | fact_table1 t1 | INNER JOIN fact_table2 t2 ON (t1.`empname` = t2.`empname`)) gen_subsumer_0 """.stripMargin), ("qCarbon1", """ | SELECT t1.`empname` AS `c1`, t2.`designation`, t2.`empname` AS `c2` | FROM | fact_table1 t1 | INNER JOIN fact_table2 t2 ON (t1.`empname` = t2.`empname`) """.stripMargin), ("qLeftJoin", """ | SELECT | i_category, | ss_ext_sales_price ext_sales_price | FROM store_sales | Left Join item | ON ss_item_sk=i_item_sk """.stripMargin) ) // .map { case (name, sqlText) => // Query(name + "-v1.4", sqlText, description = "TPCDS 1.4 Query", executionMode = // CollectResults) // } // val tpcds1_4QueriesMap = tpcds1_4Queries.map(q => q.name.split("-").get(0) -> q).toMap // // val runnable: Seq[Query] = Seq( // "q1", "q2", "q3", "q4", "q5", "q7", "q8", "q9", // "q11", "q12", "q13", "q15", "q17", "q18", "q19", // "q20", "q21", "q22", "q25", "q26", "q27", "q28", "q29", // "q31", "q34", "q36", "q37", "q38", "q39a", "q39b", // "q40", "q42", "q43", "q44", "q46", "q47", "q48", "q49", // "q50", "q51", "q52", "q53", "q54", "q55", "q57", "q59", // "q61", "q62", "q63", "q64", "q65", "q66", "q67", "q68", // "q71", "q72", "q73", "q74", "q75", "q76", "q77", "q78", "q79", // "q80", "q82", "q84", "q85", "q86", "q87", "q88", "q89", // "q90", "q91", "q93", "q96", "q97", "q98", "q99", "qSsMax").map(tpcds1_4QueriesMap) // // val all: Seq[Query] = tpcds1_4QueriesMap.values.toSeq }
zzcclp/carbondata
integration/spark/src/test/scala/org/apache/carbondata/view/testutil/Tpcds_1_4_QueryBatch.scala
Scala
apache-2.0
205,502
package jp.co.bizreach.elasticsearch4s import JsonUtils._ sealed trait BulkAction { def jsonString: String } object BulkAction { case class Create(config: ESConfig, doc: AnyRef, id: Option[String] = None) extends BulkAction { def jsonString: String = { s"""{ "create" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("")}"${id.map(_id => s""", "_id": "${_id}"""").getOrElse("")} } } |${singleLine(serialize(doc))}""".stripMargin } } case class Update(config: ESConfig, doc: AnyRef, id: String) extends BulkAction { def jsonString: String = { s"""{ "update" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("")}", "_id": "${id}"} } } |{ "doc": ${singleLine(serialize(doc))} }""".stripMargin } } case class Script(config: ESConfig, script: String, id: String) extends BulkAction { def jsonString: String = { s"""{ "update" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("")}", "_id": "${id}"} } } |{ "script": ${script} }""".stripMargin } } case class Delete(config: ESConfig, id: String) extends BulkAction { def jsonString: String = { s"""{ "delete" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("")}", "_id": "${id}"} } }""" } } private def singleLine(str: String): String = str.replace("\\n", "").replace("\\r", "") }
saito400/elastic-scala-httpclient
elastic-scala-httpclient/src/main/scala/jp/co/bizreach/elasticsearch4s/BulkAction.scala
Scala
apache-2.0
1,447
package demo package pages import chandu0101.scalajs.react.components.mixins.AsyncLoad import demo.components.LeftNavPage import demo.routes.{LeftRoute, ReactSelectRouteModule} import japgolly.scalajs.react.extra.router2.RouterCtl import japgolly.scalajs.react.{BackendScope, ReactComponentB} object ReactSelectPage { class Backend(t: BackendScope[_, _]) extends AsyncLoad { override val jsResources: Vector[String] = Vector("assets/reactselectpage-bundle.js") } val component = ReactComponentB[Props]("ReactSelectPage") .stateless .backend(new Backend(_)) .render((P,S,B) => { LeftNavPage(ReactSelectRouteModule.menu, P.selectedPage, P.ctrl) }) .configure(AsyncLoad.mixin) .build case class Props(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) def apply(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) = component(Props(selectedPage, ctrl)) }
tpdi/scalajs-react-components
demo/src/main/scala/demo/pages/ReactSelectPage.scala
Scala
apache-2.0
902
package com.twitter.finagle.util import com.twitter.app.GlobalFlag import com.twitter.logging.Level import com.twitter.util.NonFatal import com.twitter.util.registry.GlobalRegistry import java.io.{File, IOException} import java.net.{URI, URISyntaxException, URLClassLoader} import java.nio.charset.MalformedInputException import java.util.ServiceConfigurationError import java.util.jar.JarFile import scala.collection.JavaConverters._ import scala.collection.mutable import scala.io.Source import scala.reflect.ClassTag /** * Inspect and load the classpath. Inspired by Guava's ClassPath * utility. * * @note This is not a generic facility, rather it is designed * specifically to support LoadService. */ private object ClassPath { private val defaultIgnoredPackages = Seq( "apple/", "ch/epfl/", "com/apple/", "com/oracle/", "com/sun/", "java/", "javax/", "scala/", "sun/", "sunw/") private[util] def ignoredPackages = defaultIgnoredPackages ++ loadServiceIgnoredPaths() /** * Information about a classpath entry. */ case class Info(path: String, iface: String, lines: Seq[String]) /** * Browse the given classloader recursively from * its package root. */ def browse(loader: ClassLoader): Seq[Info] = { val buf = mutable.Buffer[Info]() for ((uri, loader) <- getEntries(loader)) browseUri(uri, loader, buf) buf } private def ifaceOfName(name: String) = if (!(name contains "META-INF")) None else name.split("/").takeRight(3) match { case Array("META-INF", "services", iface) => Some(iface) case _ => None } private def getEntries(loader: ClassLoader): Seq[(URI, ClassLoader)] = { val ents = mutable.Buffer[(URI, ClassLoader)]() val parent = loader.getParent if (parent != null) ents ++= getEntries(parent) loader match { case urlLoader: URLClassLoader => for (url <- urlLoader.getURLs) { ents += (url.toURI -> loader) } case _ => } ents } private[finagle] def browseUri( uri: URI, loader: ClassLoader, buf: mutable.Buffer[Info] ): Unit = browseUri0(uri, loader, buf, mutable.Set[String]()) private def browseUri0( uri: URI, loader: ClassLoader, buf: mutable.Buffer[Info], history: mutable.Set[String] ): Unit = { if (uri.getScheme != "file") return val f = new File(uri) if (!(f.exists() && f.canRead)) return val path = f.getCanonicalPath if (history.contains(path)) return history.add(path) if (f.isDirectory) browseDir(f, loader, "", buf) else browseJar(f, loader, buf, history) } private def browseDir( dir: File, loader: ClassLoader, prefix: String, buf: mutable.Buffer[Info]) { if (ignoredPackages contains prefix) return for (f <- dir.listFiles) if (f.isDirectory && f.canRead) browseDir(f, loader, prefix + f.getName + "/", buf) else for (iface <- ifaceOfName(prefix + f.getName)) { val source = Source.fromFile(f, "UTF-8") val lines = readLines(source) buf += Info(prefix + f.getName, iface, lines) } } private def browseJar(file: File, loader: ClassLoader, buf: mutable.Buffer[Info], history: mutable.Set[String]) { val jarFile = try new JarFile(file) catch { case _: IOException => return // not a Jar file } try { for (uri <- jarClasspath(file, jarFile.getManifest)) browseUri0(uri, loader, buf, history) for { e <- jarFile.entries.asScala if !e.isDirectory n = e.getName if !(ignoredPackages exists (n startsWith _)) iface <- ifaceOfName(n) } { val source = Source.fromInputStream(jarFile.getInputStream(e), "UTF-8") val lines = readLines(source) buf += Info(n, iface, lines) } } finally { try jarFile.close() catch { case _: IOException => } } } private def jarClasspath(jarFile: File, manifest: java.util.jar.Manifest): Seq[URI] = for { m <- Option(manifest).toSeq attr <- Option(m.getMainAttributes.getValue("Class-Path")).toSeq el <- attr.split(" ") uri <- uriFromJarClasspath(jarFile, el) } yield uri private def uriFromJarClasspath(jarFile: File, path: String) = try { val uri = new URI(path) if (uri.isAbsolute) Some(uri) else Some(new File(jarFile.getParentFile, path.replace('/', File.separatorChar)).toURI) } catch { case _: URISyntaxException => None } private[util] def readLines(source: Source): Seq[String] = { try { source.getLines().toArray.flatMap { line => val commentIdx = line.indexOf('#') val end = if (commentIdx != -1) commentIdx else line.length val str = line.substring(0, end).trim if (str.isEmpty) Nil else Seq(str) } } catch { case ex: MalformedInputException => Nil /* skip malformed files (e.g. non UTF-8) */ } finally { source.close() } } } object loadServiceIgnoredPaths extends GlobalFlag(Seq.empty[String], "Additional packages to be excluded from recursive directory scan") /** * Load a singleton class in the manner of [[java.util.ServiceLoader]]. It is * more resilient to varying Java packaging configurations than ServiceLoader. */ object LoadService { private val cache: mutable.Map[ClassLoader, Seq[ClassPath.Info]] = mutable.Map.empty def apply[T: ClassTag](): Seq[T] = synchronized { val iface = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]] val ifaceName = iface.getName val loader = iface.getClassLoader val classNames = for { info <- cache.getOrElseUpdate(loader, ClassPath.browse(loader)) if info.iface == ifaceName className <- info.lines } yield className val classNamesFromResources = for { rsc <- loader.getResources("META-INF/services/" + ifaceName).asScala line <- ClassPath.readLines(Source.fromURL(rsc, "UTF-8")) } yield line val buffer = mutable.ListBuffer.empty[String] val result = (classNames ++ classNamesFromResources).distinct.flatMap { className => val cls = Class.forName(className) if (!(iface isAssignableFrom cls)) throw new ServiceConfigurationError(s"$className not a subclass of $ifaceName") DefaultLogger.log( Level.DEBUG, s"LoadService: loaded instance of class $className for requested service $ifaceName" ) try { val instance = cls.newInstance().asInstanceOf[T] buffer += className Some(instance) } catch { case NonFatal(ex) => DefaultLogger.log( Level.FATAL, s"LoadService: failed to instantiate '$className' for the requested " + s"service '$ifaceName'", ex ) None } } GlobalRegistry.get.put(Seq("loadservice", ifaceName), buffer.mkString(",")) result } }
lysu/finagle
finagle-core/src/main/scala/com/twitter/finagle/util/LoadService.scala
Scala
apache-2.0
6,980