code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package sssg.domain
/**
* sssg.domain
* User: nk
* Date: 2016-03-14 13:41
*/
abstract class Context() {
}
| nikosk/sssg | src/main/scala/sssg/domain/Context.scala | Scala | mit | 116 |
package iotus.core
import java.net.URI
case class CassandraConnectionUri(dbUrl: String) {
private val uri = new URI(dbUrl)
private val additionalHosts = Option(uri.getQuery) match {
case Some(query) => query.split('&').map(_.split('=')).filter(param => param(0) == "host").map(param => param(1)).toSeq
case None => Seq.empty
}
val host = uri.getHost
val hosts = Seq(uri.getHost) ++ additionalHosts
val port = uri.getPort
val keyspace = uri.getPath.substring(1)
}
| petermichalek/iotan-core | src/main/scala/iotus/core/CassandraConnectionUri.scala | Scala | apache-2.0 | 492 |
package bollinger
object GenerateBollingerBands {
////val goal = HiveGoal()
} | jeromebanks/satisfaction | modules/samples/src/main/scala/bollinger/GenerateBollingerBands.scala | Scala | apache-2.0 | 83 |
package com.sksamuel.elastic4s.indexes
import com.sksamuel.elastic4s.{Index, IndexAndType}
trait IndexApi {
def indexInto(index: String, `type`: String): IndexRequest = indexInto(IndexAndType(index, `type`))
def indexInto(index: Index, `type`: String): IndexRequest = indexInto(IndexAndType(index.name, `type`))
def indexInto(indexType: IndexAndType): IndexRequest = IndexRequest(indexType)
def indexInto(index: String): IndexRequest = IndexRequest(index)
def index(kv: (String, String)): IndexRequest = IndexRequest(IndexAndType(kv._1, kv._2))
def getIndex(index: String, others: String*): GetIndexRequest = getIndex(index +: others)
def getIndex(indexes: Seq[String]): GetIndexRequest = GetIndexRequest(indexes.mkString(","))
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/indexes/IndexApi.scala | Scala | apache-2.0 | 762 |
package ch.uzh.ifi.pdeboer.pplib.examples.gdpinfluence
import java.security.MessageDigest
/**
* Created by marcello on 30/12/16.
*/
object HashValidation extends App{
def extractToken(original:String): String = {
val index = original.indexOf('|')
return original.substring(index+1)
}
/**
* validate("0:7,1:3,2:5,3:8,4:4,5:9,6:14,7:13,8:1,9:2,10:10,11:11,12:6,13:15,14:12|7c19af4c1a89f83ecb44e43b3b5b92f1", "Mr Peter Test")
* @param token token from answer (including ranking string. Part after "|" should be hashed name)
* @param original name / random String
* @return boolean true if is valid else false
*/
def validateToken(token:String, original:String): Boolean ={
val givenHash = extractToken(token)
val trueHash = md5(original).map("%02X".format(_)).mkString.toLowerCase()
return trueHash == givenHash.toLowerCase()
}
def md5(s: String) = {
MessageDigest.getInstance("MD5").digest(s.getBytes)
}
}
| uzh/PPLib | src/main/scala/ch/uzh/ifi/pdeboer/pplib/examples/gdpinfluence/HashValidation.scala | Scala | mit | 972 |
package com.mesosphere.cosmos.error
import io.circe.Encoder
import io.circe.JsonObject
import io.circe.generic.semiauto.deriveEncoder
final case class OptionsNotAllowed() extends CosmosError {
override def data: Option[JsonObject] = None
override def message: String = "No schema available to validate the provided options"
}
object OptionsNotAllowed {
implicit val encoder: Encoder[OptionsNotAllowed] = deriveEncoder
}
| dcos/cosmos | cosmos-common/src/main/scala/com/mesosphere/cosmos/error/OptionsNotAllowed.scala | Scala | apache-2.0 | 429 |
package com.nakoradio.scalc.core.parser
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner.JUnitRunner
import org.specs2.mutable.{ BeforeAfter, Specification }
import java.util.Base64
import javafx.scene.shape.ClosePath
import scala.collection.mutable.Stack
import com.sun.scenario.animation.NumberTangentInterpolator
@RunWith(classOf[JUnitRunner])
class ShuntingYardParserTest extends Specification {
val parser = new ShuntingYardParser()
"Parser" should {
"ignore whitespace" in {
parser(" 2 * ( 3 + 4 ) ").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), Add(), Multiply()))
}
"handle expression without whitespace " in {
parser("2*(3+4)").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), Add(), Multiply()))
}
"parse numbers" in {
parser("4") must beEqualTo(Stack(NumberTerm(4)))
parser("-123.5") must beEqualTo(Stack(NumberTerm(-123.5)))
parser("-123,5") must beEqualTo(Stack(NumberTerm(-123.5)))
parser("-.5") must beEqualTo(Stack(NumberTerm(-.5)))
parser(".5") must beEqualTo(Stack(NumberTerm(.5)))
}
"parse simple operations" in {
parser("4 + 3").reverse must beEqualTo(Stack(NumberTerm(4), NumberTerm(3), Add()))
parser("4 - 3").reverse must beEqualTo(Stack(NumberTerm(4), NumberTerm(3), Subtract()))
parser("4 * 3").reverse must beEqualTo(Stack(NumberTerm(4), NumberTerm(3), Multiply()))
parser("4 / 3").reverse must beEqualTo(Stack(NumberTerm(4), NumberTerm(3), Divide()))
}
"parse special numbers in operations" in {
parser("-.2 - .3 * +.4").reverse must beEqualTo(Stack(NumberTerm(-.2), NumberTerm(.3), NumberTerm(.4), Multiply(), Subtract()))
}
"parse double operation symbols" in {
parser("4 - -3").reverse must beEqualTo(Stack(NumberTerm(4), NumberTerm(-3), Subtract()))
parser("5 + -3").reverse must beEqualTo(Stack(NumberTerm(5), NumberTerm(-3), Add()))
parser("6 - +3").reverse must beEqualTo(Stack(NumberTerm(6), NumberTerm(3), Subtract()))
parser("7 * -3").reverse must beEqualTo(Stack(NumberTerm(7), NumberTerm(-3), Multiply()))
}
"parse repetive numbers" in {
parser("-1 - 1 - 1").reverse must beEqualTo(Stack(NumberTerm(-1), NumberTerm(1), Subtract(), NumberTerm(1), Subtract()))
parser("100 / 10 / 2").reverse must beEqualTo(Stack(NumberTerm(100), NumberTerm(10), Divide(), NumberTerm(2), Divide()))
}
"consider operator precedence" in {
parser("1 - 2 * 3").reverse must beEqualTo(Stack(NumberTerm(1), NumberTerm(2), NumberTerm(3), Multiply(), Subtract()))
parser("1 + 5 / 3").reverse must beEqualTo(Stack(NumberTerm(1), NumberTerm(5), NumberTerm(3), Divide(), Add()))
}
"parse parenthesis" in {
parser("2 * ( 3 + 4)").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), Add(), Multiply()))
parser("2 * ( 3 / (4+2) )").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), NumberTerm(2), Add(), Divide(), Multiply()))
parser("((2) + 2) * 4").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(2), Add(), NumberTerm(4), Multiply()))
}
"parse numbers paired to parenthesis" in {
parser("2(3+4)").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), Add(), Multiply()))
parser("2 ( 3+4)").reverse must beEqualTo(Stack(NumberTerm(2), NumberTerm(3), NumberTerm(4), Add(), Multiply()))
}
"parse special cases" in {
parser("-4 + 3").reverse must beEqualTo(Stack(NumberTerm(-4), NumberTerm(3), Add()))
}
// Error cases
"throw exception on mismatched parenthesis" in {
parser("2 + 1) +2").reverse must throwA(new ShuntException("Parsing failed due to [mismatched parenthesis] on input [2 + 1) +2]"))
parser("2 + (1+2").reverse must throwA(new ShuntException("Parsing failed due to [mismatched parenthesis] on input [2 + (1+2]"))
}
"throw exception on unrecoqnized tokens" in {
parser("2 + a").reverse must throwA(new ShuntException("Parsing failed due to [unrecognized token 'a'] on input [2 + a]"))
}
}
} | Arch-vile/sCalc | test/com/nakoradio/scalc/core/parser/ShuntingYardParserTest.scala | Scala | mit | 4,202 |
package monocle.macros.internal
trait MacrosCompatibility {
type Context = scala.reflect.macros.blackbox.Context
def getDeclarations(c: Context)(tpe: c.universe.Type): c.universe.MemberScope =
tpe.decls
def getParameterLists(c: Context)(method: c.universe.MethodSymbol): List[List[c.universe.Symbol]] =
method.paramLists
def getDeclaration(c: Context)(tpe: c.universe.Type, name: c.universe.Name): c.universe.Symbol =
tpe.decl(name)
def createTermName(c: Context)(name: String): c.universe.TermName =
c.universe.TermName(name)
def createTypeName(c: Context)(name: String): c.universe.TypeName =
c.universe.TypeName(name)
def resetLocalAttrs(c: Context)(tree: c.Tree): c.Tree =
c.untypecheck(tree)
def getTermNames(c: Context): c.universe.TermNamesApi =
c.universe.termNames
def companionTpe(c: Context)(tpe: c.universe.Type): c.universe.Symbol =
tpe.typeSymbol.companion
} | CapeSepias/Monocle | macro/src/main/scala-2.11/monocle.macros.internal/MacrosCompatibility.scala | Scala | mit | 931 |
/*
* Copyright (C) 2016 Department for Business, Energy and Industrial Strategy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package rifs.business.actions
import javax.inject.Inject
import play.api.mvc.Results._
import play.api.mvc._
import rifs.business.data.ApplicationOps
import rifs.business.models.ApplicationId
import rifs.business.restmodels.Application
import scala.concurrent.{ExecutionContext, Future}
case class ApplicationRequest[A](Application: Application, request: Request[A]) extends WrappedRequest[A](request)
class ApplicationAction @Inject()(opportunities: ApplicationOps)(implicit ec: ExecutionContext) {
def apply(id: ApplicationId): ActionBuilder[ApplicationRequest] =
new ActionBuilder[ApplicationRequest] {
override def invokeBlock[A](request: Request[A], next: (ApplicationRequest[A]) => Future[Result]): Future[Result] = {
opportunities.application(id).flatMap {
case Some(opp) => next(ApplicationRequest(opp, request))
case None => Future.successful(NotFound(s"No application with id ${id.id} exists"))
}
}
}
} | UKGovernmentBEIS/rifs-business | src/main/scala/rifs/business/actions/ApplicationAction.scala | Scala | gpl-3.0 | 1,711 |
package quizleague.domain.command
import quizleague.domain.Key
case class ResultsSubmitCommand(fixtures:List[ResultValues], reportText:Option[String], userID:String)
case class ResultValues(fixtureKey:Key, homeScore:Int, awayScore:Int) | gumdrop/quizleague-maintain | shared/src/main/scala/quizleague/domain/command/ResultsSubmitCommand.scala | Scala | mit | 237 |
package com.blinkbox.books.search.ingester
import akka.actor.{ ActorRef, ActorSystem, Props, Status }
import akka.testkit.{ ImplicitSender, TestActorRef, TestKit }
import com.blinkbox.books.messaging._
import com.blinkbox.books.test.MockitoSyrup
import java.io.IOException
import org.junit.runner.RunWith
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.mockito.verification.VerificationMode
import org.scalatest.BeforeAndAfter
import org.scalatest.FlatSpecLike
import org.scalatest.StreamlinedXmlEquality
import org.scalatest.junit.JUnitRunner
import org.xml.sax.SAXException
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.io.Source
import scala.util.Success
import scala.util.Failure
import scala.xml.XML
@RunWith(classOf[JUnitRunner])
class BookMetadataTransformerTest extends TestKit(ActorSystem("test-system")) with ImplicitSender
with FlatSpecLike with BeforeAndAfter with MockitoSyrup with StreamlinedXmlEquality {
"A book data processor" should "pass on transformed book distribute message" in new TestFixture {
handler ! event("/example.book.xml")
expectMsgType[Status.Success]
checkSuccessfulResult("/example.book.out.xml")
checkNoFailures()
}
it should "pass on transformed book undistribute message" in new TestFixture {
handler ! event("/example.undistribute.xml")
expectMsgType[Status.Success]
checkSuccessfulResult("/example.undistribute.out.xml")
checkNoFailures()
}
it should "pass on transformed price data message" in new TestFixture {
handler ! event("/example.price.xml")
expectMsgType[Status.Success]
checkSuccessfulResult("/example.price.out.xml")
checkNoFailures()
}
it should "pass on message to error handler for invalid incoming XML" in new TestFixture {
val invalidEvent = Event.xml("This is not valid XML", EventHeader.apply("test"))
handler ! invalidEvent
// Should successfully complete processing of this message (i.e. not retry).
expectMsgType[Status.Success]
checkFailure[SAXException](invalidEvent)
}
it should "retry in case of a temporary error from its output" in new TestFixture {
// Force a temporary exception to happen when passing on output command to Solr.
val tempException = new IOException("Test exception")
when(output.handleUpdate(anyString))
.thenReturn(Future.failed(tempException))
.thenReturn(Future.failed(tempException))
.thenReturn(Future.successful(()))
handler ! event("/example.book.xml")
expectMsgType[Status.Success]
checkSuccessfulResult("/example.book.out.xml", attempts = times(3))
checkNoFailures()
}
it should "pass in message to error handler for non-temporary errors from its output" in new TestFixture {
val unrecoverableError = new IllegalArgumentException("Test unrecoverable error")
when(output.handleUpdate(anyString))
.thenReturn(Future.failed(unrecoverableError))
val inputEvent = event("/example.book.xml")
handler ! inputEvent
// Should successfully complete processing of this message (i.e. not retry).
expectMsgType[Status.Success]
verify(errorHandler).handleError(inputEvent, unrecoverableError)
}
trait TestFixture {
val retryInterval = 100.millis
// Define mocks and initialise them with default behaviour.
val output = mock[SolrApi]
val errorHandler = mock[ErrorHandler]
doReturn(Future.successful(())).when(errorHandler).handleError(any[Event], any[Throwable])
doReturn(Future.successful(())).when(output).handleUpdate(anyString)
// The actor under test.
val handler = TestActorRef(Props(new BookMetadataTransformer(output, errorHandler, retryInterval)))
/** Create input event. */
def event(inputFilename: String) = Event.xml(fileToString(inputFilename), EventHeader.apply("test"))
/** Check that the event was processed successfully by checking the various outputs. */
def checkSuccessfulResult(expectedFilename: String, attempts: VerificationMode = times(1)) = {
val outputCaptor = ArgumentCaptor.forClass(classOf[String])
verify(output, attempts).handleUpdate(outputCaptor.capture)
val expectedXml = XML.loadString(fileToString(expectedFilename))
val producedXml = XML.loadString(outputCaptor.getValue)
assert(expectedXml === producedXml)
}
def fileToString(inputFilename: String): String = {
val input = getClass.getResourceAsStream(inputFilename)
assert(input != null, s"Couldn't find test input $inputFilename")
Source.fromInputStream(input).mkString
}
def checkNoFailures() = verify(errorHandler, times(0)).handleError(any[Event], any[Throwable])
/** Check that event processing failed and was treated correctly. */
def checkFailure[T <: Throwable](event: Event)(implicit manifest: Manifest[T]) {
// Check no output was given.
verify(output, times(0)).handleUpdate(anyString)
// Check event was passed on to error handler, along with the expected exception.
val expectedExceptionClass = manifest.runtimeClass.asInstanceOf[Class[T]]
verify(errorHandler).handleError(eql(event), isA(expectedExceptionClass))
}
}
}
| blinkboxbooks/search-ingester-service.scala | src/test/scala/com/blinkbox/books/search/ingester/BookMetadataTransformerTest.scala | Scala | mit | 5,260 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features
/**
* Options to be applied when encoding. The same options must be specified when decoding.
*/
object SerializationOption extends Enumeration {
type SerializationOption = Value
/**
* If this [[SerializationOption]] is specified then all user data of the simple feature will be
* serialized and deserialized.
*/
val WithUserData = Value
val WithoutId = Value
implicit class SerializationOptions(val options: Set[SerializationOption]) extends AnyVal {
/**
* @param value the value to search for
* @return true iff ``this`` contains the given ``value``
*/
def contains(value: SerializationOption.Value) = options.contains(value)
/** @return true iff ``this`` contains ``EncodingOption.WITH_USER_DATA`` */
def withUserData: Boolean = options.contains(SerializationOption.WithUserData)
def withoutId: Boolean = options.contains(SerializationOption.WithoutId)
}
object SerializationOptions {
/**
* An empty set of encoding options.
*/
val none: Set[SerializationOption] = Set.empty[SerializationOption]
/**
* @return a new [[SerializationOptions]] containing just the ``EncodingOption.WITH_USER_DATA`` option
*/
val withUserData: Set[SerializationOption] = Set(SerializationOption.WithUserData)
val withoutId: Set[SerializationOption] = Set(SerializationOption.WithoutId)
}
}
| MutahirKazmi/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/SerializationOption.scala | Scala | apache-2.0 | 1,889 |
/**
* Generated by apidoc - http://www.apidoc.me
* Service version: 0.11.26
* apidoc:0.11.33 http://www.apidoc.me/bryzek/apidoc-common/0.11.26/play_2_x_json
*/
package com.bryzek.apidoc.common.v0.models {
case class Audit(
createdAt: _root_.org.joda.time.DateTime,
createdBy: com.bryzek.apidoc.common.v0.models.ReferenceGuid,
updatedAt: _root_.org.joda.time.DateTime,
updatedBy: com.bryzek.apidoc.common.v0.models.ReferenceGuid
)
case class Healthcheck(
status: String
)
/**
* Represents a reference to another model.
*/
case class Reference(
guid: _root_.java.util.UUID,
key: String
)
case class ReferenceGuid(
guid: _root_.java.util.UUID
)
}
package com.bryzek.apidoc.common.v0.models {
package object json {
import play.api.libs.json.__
import play.api.libs.json.JsString
import play.api.libs.json.Writes
import play.api.libs.functional.syntax._
import com.bryzek.apidoc.common.v0.models.json._
private[v0] implicit val jsonReadsUUID = __.read[String].map(java.util.UUID.fromString)
private[v0] implicit val jsonWritesUUID = new Writes[java.util.UUID] {
def writes(x: java.util.UUID) = JsString(x.toString)
}
private[v0] implicit val jsonReadsJodaDateTime = __.read[String].map { str =>
import org.joda.time.format.ISODateTimeFormat.dateTimeParser
dateTimeParser.parseDateTime(str)
}
private[v0] implicit val jsonWritesJodaDateTime = new Writes[org.joda.time.DateTime] {
def writes(x: org.joda.time.DateTime) = {
import org.joda.time.format.ISODateTimeFormat.dateTime
val str = dateTime.print(x)
JsString(str)
}
}
implicit def jsonReadsApidoccommonAudit: play.api.libs.json.Reads[Audit] = {
(
(__ \ "created_at").read[_root_.org.joda.time.DateTime] and
(__ \ "created_by").read[com.bryzek.apidoc.common.v0.models.ReferenceGuid] and
(__ \ "updated_at").read[_root_.org.joda.time.DateTime] and
(__ \ "updated_by").read[com.bryzek.apidoc.common.v0.models.ReferenceGuid]
)(Audit.apply _)
}
def jsObjectAudit(obj: com.bryzek.apidoc.common.v0.models.Audit) = {
play.api.libs.json.Json.obj(
"created_at" -> play.api.libs.json.JsString(_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(obj.createdAt)),
"created_by" -> jsObjectReferenceGuid(obj.createdBy),
"updated_at" -> play.api.libs.json.JsString(_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(obj.updatedAt)),
"updated_by" -> jsObjectReferenceGuid(obj.updatedBy)
)
}
implicit def jsonWritesApidoccommonAudit: play.api.libs.json.Writes[Audit] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Audit] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Audit) = {
jsObjectAudit(obj)
}
}
}
implicit def jsonReadsApidoccommonHealthcheck: play.api.libs.json.Reads[Healthcheck] = {
(__ \ "status").read[String].map { x => new Healthcheck(status = x) }
}
def jsObjectHealthcheck(obj: com.bryzek.apidoc.common.v0.models.Healthcheck) = {
play.api.libs.json.Json.obj(
"status" -> play.api.libs.json.JsString(obj.status)
)
}
implicit def jsonWritesApidoccommonHealthcheck: play.api.libs.json.Writes[Healthcheck] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Healthcheck] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Healthcheck) = {
jsObjectHealthcheck(obj)
}
}
}
implicit def jsonReadsApidoccommonReference: play.api.libs.json.Reads[Reference] = {
(
(__ \ "guid").read[_root_.java.util.UUID] and
(__ \ "key").read[String]
)(Reference.apply _)
}
def jsObjectReference(obj: com.bryzek.apidoc.common.v0.models.Reference) = {
play.api.libs.json.Json.obj(
"guid" -> play.api.libs.json.JsString(obj.guid.toString),
"key" -> play.api.libs.json.JsString(obj.key)
)
}
implicit def jsonWritesApidoccommonReference: play.api.libs.json.Writes[Reference] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.Reference] {
def writes(obj: com.bryzek.apidoc.common.v0.models.Reference) = {
jsObjectReference(obj)
}
}
}
implicit def jsonReadsApidoccommonReferenceGuid: play.api.libs.json.Reads[ReferenceGuid] = {
(__ \ "guid").read[_root_.java.util.UUID].map { x => new ReferenceGuid(guid = x) }
}
def jsObjectReferenceGuid(obj: com.bryzek.apidoc.common.v0.models.ReferenceGuid) = {
play.api.libs.json.Json.obj(
"guid" -> play.api.libs.json.JsString(obj.guid.toString)
)
}
implicit def jsonWritesApidoccommonReferenceGuid: play.api.libs.json.Writes[ReferenceGuid] = {
new play.api.libs.json.Writes[com.bryzek.apidoc.common.v0.models.ReferenceGuid] {
def writes(obj: com.bryzek.apidoc.common.v0.models.ReferenceGuid) = {
jsObjectReferenceGuid(obj)
}
}
}
}
}
package com.bryzek.apidoc.common.v0 {
object Bindables {
import play.api.mvc.{PathBindable, QueryStringBindable}
import org.joda.time.{DateTime, LocalDate}
import org.joda.time.format.ISODateTimeFormat
import com.bryzek.apidoc.common.v0.models._
// Type: date-time-iso8601
implicit val pathBindableTypeDateTimeIso8601 = new PathBindable.Parsing[org.joda.time.DateTime](
ISODateTimeFormat.dateTimeParser.parseDateTime(_), _.toString, (key: String, e: _root_.java.lang.Exception) => s"Error parsing date time $key. Example: 2014-04-29T11:56:52Z"
)
implicit val queryStringBindableTypeDateTimeIso8601 = new QueryStringBindable.Parsing[org.joda.time.DateTime](
ISODateTimeFormat.dateTimeParser.parseDateTime(_), _.toString, (key: String, e: _root_.java.lang.Exception) => s"Error parsing date time $key. Example: 2014-04-29T11:56:52Z"
)
// Type: date-iso8601
implicit val pathBindableTypeDateIso8601 = new PathBindable.Parsing[org.joda.time.LocalDate](
ISODateTimeFormat.yearMonthDay.parseLocalDate(_), _.toString, (key: String, e: _root_.java.lang.Exception) => s"Error parsing date $key. Example: 2014-04-29"
)
implicit val queryStringBindableTypeDateIso8601 = new QueryStringBindable.Parsing[org.joda.time.LocalDate](
ISODateTimeFormat.yearMonthDay.parseLocalDate(_), _.toString, (key: String, e: _root_.java.lang.Exception) => s"Error parsing date $key. Example: 2014-04-29"
)
}
}
| Seanstoppable/apidoc | generated/app/BryzekApidocCommonV0Models.scala | Scala | mit | 6,581 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.expressions.codegen.{BufferHolder, UnsafeRowWriter}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.unsafe.types.UTF8String
/** A simple class for converting Kafka ConsumerRecord to UnsafeRow */
private[kafka010] class KafkaRecordToUnsafeRowConverter {
private val sharedRow = new UnsafeRow(7)
private val bufferHolder = new BufferHolder(sharedRow)
private val rowWriter = new UnsafeRowWriter(bufferHolder, 7)
def toUnsafeRow(record: ConsumerRecord[Array[Byte], Array[Byte]]): UnsafeRow = {
bufferHolder.reset()
if (record.key == null) {
rowWriter.setNullAt(0)
} else {
rowWriter.write(0, record.key)
}
rowWriter.write(1, record.value)
rowWriter.write(2, UTF8String.fromString(record.topic))
rowWriter.write(3, record.partition)
rowWriter.write(4, record.offset)
rowWriter.write(
5,
DateTimeUtils.fromJavaTimestamp(new java.sql.Timestamp(record.timestamp)))
rowWriter.write(6, record.timestampType.id)
sharedRow.setTotalSize(bufferHolder.totalSize)
sharedRow
}
}
| ioana-delaney/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaRecordToUnsafeRowConverter.scala | Scala | apache-2.0 | 2,074 |
package com.sksamuel.elastic4s.requests.searches
case class Total(value: Long, relation: String)
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/Total.scala | Scala | apache-2.0 | 98 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.util.Locale
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.{Buffer, ListBuffer}
import scala.xml.{Node, NodeSeq, Unparsed, Utility}
import org.apache.commons.text.StringEscapeUtils
import org.apache.spark.JobExecutionStatus
import org.apache.spark.internal.config.UI._
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1
import org.apache.spark.ui._
/** Page showing statistics and stage list for a given job */
private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIPage("job") {
private val MAX_TIMELINE_STAGES = parent.conf.get(UI_TIMELINE_STAGES_MAXIMUM)
private val MAX_TIMELINE_EXECUTORS = parent.conf.get(UI_TIMELINE_EXECUTORS_MAXIMUM)
private val STAGES_LEGEND =
<div class="legend-area"><svg width="150px" height="85px">
<rect class="completed-stage-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Completed</text>
<rect class="failed-stage-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Failed</text>
<rect class="active-stage-legend"
x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="67px">Active</text>
</svg></div>.toString.filter(_ != '\\n')
private val EXECUTORS_LEGEND =
<div class="legend-area"><svg width="150px" height="55px">
<rect class="executor-added-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Added</text>
<rect class="executor-removed-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Removed</text>
</svg></div>.toString.filter(_ != '\\n')
private def makeStageEvent(stageInfos: Seq[v1.StageData]): Seq[String] = {
val now = System.currentTimeMillis()
stageInfos.sortBy { s =>
(s.completionTime.map(_.getTime).getOrElse(now), s.submissionTime.get.getTime)
}.takeRight(MAX_TIMELINE_STAGES).map { stage =>
val stageId = stage.stageId
val attemptId = stage.attemptId
val name = stage.name
val status = stage.status.toString.toLowerCase(Locale.ROOT)
val submissionTime = stage.submissionTime.get.getTime()
val completionTime = stage.completionTime.map(_.getTime())
.getOrElse(now)
// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a JavaScript string literal.
val escapedName = Utility.escape(name)
val jsEscapedNameForTooltip = StringEscapeUtils.escapeEcmaScript(Utility.escape(escapedName))
val jsEscapedNameForLabel = StringEscapeUtils.escapeEcmaScript(escapedName)
s"""
|{
| 'className': 'stage job-timeline-object ${status}',
| 'group': 'stages',
| 'start': new Date(${submissionTime}),
| 'end': new Date(${completionTime}),
| 'content': '<div class="job-timeline-content" data-toggle="tooltip"' +
| 'data-placement="top" data-html="true"' +
| 'data-title="${jsEscapedNameForTooltip} (Stage ${stageId}.${attemptId})<br>' +
| 'Status: ${status.toUpperCase(Locale.ROOT)}<br>' +
| 'Submitted: ${UIUtils.formatDate(submissionTime)}' +
| '${
if (status != "running") {
s"""<br>Completed: ${UIUtils.formatDate(completionTime)}"""
} else {
""
}
}">' +
| '${jsEscapedNameForLabel} (Stage ${stageId}.${attemptId})</div>',
|}
""".stripMargin
}
}
def makeExecutorEvent(executors: Seq[v1.ExecutorSummary]): Seq[String] = {
val events = ListBuffer[String]()
executors.sortBy { e =>
e.removeTime.map(_.getTime).getOrElse(e.addTime.getTime)
}.takeRight(MAX_TIMELINE_EXECUTORS).foreach { e =>
val addedEvent =
s"""
|{
| 'className': 'executor added',
| 'group': 'executors',
| 'start': new Date(${e.addTime.getTime()}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="top"' +
| 'data-title="Executor ${e.id}<br>' +
| 'Added at ${UIUtils.formatDate(e.addTime)}"' +
| 'data-html="true">Executor ${e.id} added</div>'
|}
""".stripMargin
events += addedEvent
e.removeTime.foreach { removeTime =>
val removedEvent =
s"""
|{
| 'className': 'executor removed',
| 'group': 'executors',
| 'start': new Date(${removeTime.getTime()}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="top"' +
| 'data-title="Executor ${e.id}<br>' +
| 'Removed at ${UIUtils.formatDate(removeTime)}' +
| '${
e.removeReason.map { reason =>
s"""<br>Reason: ${StringEscapeUtils.escapeEcmaScript(
reason.replace("\\n", " "))}"""
}.getOrElse("")
}"' +
| 'data-html="true">Executor ${e.id} removed</div>'
|}
""".stripMargin
events += removedEvent
}
}
events.toSeq
}
private def makeTimeline(
stages: Seq[v1.StageData],
executors: Seq[v1.ExecutorSummary],
appStartTime: Long): Seq[Node] = {
val stageEventJsonAsStrSeq = makeStageEvent(stages)
val executorsJsonAsStrSeq = makeExecutorEvent(executors)
val groupJsonArrayAsStr =
s"""
|[
| {
| 'id': 'executors',
| 'content': '<div>Executors</div>${EXECUTORS_LEGEND}',
| },
| {
| 'id': 'stages',
| 'content': '<div>Stages</div>${STAGES_LEGEND}',
| }
|]
""".stripMargin
val eventArrayAsStr =
(stageEventJsonAsStrSeq ++ executorsJsonAsStrSeq).mkString("[", ",", "]")
<span class="expand-job-timeline">
<span class="expand-job-timeline-arrow arrow-closed"></span>
<a data-toggle="tooltip" title={ToolTips.STAGE_TIMELINE} data-placement="top">
Event Timeline
</a>
</span> ++
<div id="job-timeline" class="collapsed">
{
if (MAX_TIMELINE_STAGES < stages.size) {
<div>
<strong>
Only the most recent {MAX_TIMELINE_STAGES} submitted/completed stages
(of {stages.size} total) are shown.
</strong>
</div>
} else {
Seq.empty
}
}
{
if (MAX_TIMELINE_EXECUTORS < executors.size) {
<div>
<strong>
Only the most recent {MAX_TIMELINE_EXECUTORS} added/removed executors
(of {executors.size} total) are shown.
</strong>
</div>
} else {
Seq.empty
}
}
<div class="control-panel">
<div id="job-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
</div> ++
<script type="text/javascript">
{Unparsed(s"drawJobTimeline(${groupJsonArrayAsStr}, ${eventArrayAsStr}, " +
s"${appStartTime}, ${UIUtils.getTimeZoneOffset()});")}
</script>
}
def render(request: HttpServletRequest): Seq[Node] = {
val parameterId = request.getParameter("id")
require(parameterId != null && parameterId.nonEmpty, "Missing id parameter")
val jobId = parameterId.toInt
val (jobData, sqlExecutionId) = store.asOption(store.jobWithAssociatedSql(jobId)).getOrElse {
val content =
<div id="no-info">
<p>No information to display for job {jobId}</p>
</div>
return UIUtils.headerSparkPage(
request, s"Details for Job $jobId", content, parent)
}
val isComplete = jobData.status != JobExecutionStatus.RUNNING
val stages = jobData.stageIds.map { stageId =>
// This could be empty if the listener hasn't received information about the
// stage or if the stage information has been garbage collected
store.asOption(store.lastStageAttempt(stageId)).getOrElse {
new v1.StageData(
status = v1.StageStatus.PENDING,
stageId = stageId,
attemptId = 0,
numTasks = 0,
numActiveTasks = 0,
numCompleteTasks = 0,
numFailedTasks = 0,
numKilledTasks = 0,
numCompletedIndices = 0,
submissionTime = None,
firstTaskLaunchedTime = None,
completionTime = None,
failureReason = None,
executorDeserializeTime = 0L,
executorDeserializeCpuTime = 0L,
executorRunTime = 0L,
executorCpuTime = 0L,
resultSize = 0L,
jvmGcTime = 0L,
resultSerializationTime = 0L,
memoryBytesSpilled = 0L,
diskBytesSpilled = 0L,
peakExecutionMemory = 0L,
inputBytes = 0L,
inputRecords = 0L,
outputBytes = 0L,
outputRecords = 0L,
shuffleRemoteBlocksFetched = 0L,
shuffleLocalBlocksFetched = 0L,
shuffleFetchWaitTime = 0L,
shuffleRemoteBytesRead = 0L,
shuffleRemoteBytesReadToDisk = 0L,
shuffleLocalBytesRead = 0L,
shuffleReadBytes = 0L,
shuffleReadRecords = 0L,
shuffleWriteBytes = 0L,
shuffleWriteTime = 0L,
shuffleWriteRecords = 0L,
name = "Unknown",
description = None,
details = "Unknown",
schedulingPool = null,
rddIds = Nil,
accumulatorUpdates = Nil,
tasks = None,
executorSummary = None,
killedTasksSummary = Map(),
ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID,
peakExecutorMetrics = None,
taskMetricsDistributions = None,
executorMetricsDistributions = None)
}
}
val activeStages = Buffer[v1.StageData]()
val completedStages = Buffer[v1.StageData]()
// If the job is completed, then any pending stages are displayed as "skipped":
val pendingOrSkippedStages = Buffer[v1.StageData]()
val failedStages = Buffer[v1.StageData]()
for (stage <- stages) {
if (stage.submissionTime.isEmpty) {
pendingOrSkippedStages += stage
} else if (stage.completionTime.isDefined) {
if (stage.status == v1.StageStatus.FAILED) {
failedStages += stage
} else {
completedStages += stage
}
} else {
activeStages += stage
}
}
val basePath = "jobs/job"
val pendingOrSkippedTableId =
if (isComplete) {
"skipped"
} else {
"pending"
}
val activeStagesTable =
new StageTableBase(store, request, activeStages.toSeq, "active", "activeStage",
parent.basePath, basePath, parent.isFairScheduler,
killEnabled = parent.killEnabled, isFailedStage = false)
val pendingOrSkippedStagesTable =
new StageTableBase(store, request, pendingOrSkippedStages.toSeq, pendingOrSkippedTableId,
"pendingStage", parent.basePath, basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = false)
val completedStagesTable =
new StageTableBase(store, request, completedStages.toSeq, "completed", "completedStage",
parent.basePath, basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = false)
val failedStagesTable =
new StageTableBase(store, request, failedStages.toSeq, "failed", "failedStage",
parent.basePath, basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = true)
val shouldShowActiveStages = activeStages.nonEmpty
val shouldShowPendingStages = !isComplete && pendingOrSkippedStages.nonEmpty
val shouldShowCompletedStages = completedStages.nonEmpty
val shouldShowSkippedStages = isComplete && pendingOrSkippedStages.nonEmpty
val shouldShowFailedStages = failedStages.nonEmpty
val summary: NodeSeq =
<div>
<ul class="list-unstyled">
<li>
<Strong>Status:</Strong>
{jobData.status}
</li>
<li>
<Strong>Submitted:</Strong>
{JobDataUtil.getFormattedSubmissionTime(jobData)}
</li>
<li>
<Strong>Duration:</Strong>
{JobDataUtil.getFormattedDuration(jobData)}
</li>
{
if (sqlExecutionId.isDefined) {
<li>
<strong>Associated SQL Query: </strong>
{<a href={"%s/SQL/execution/?id=%s".format(
UIUtils.prependBaseUri(request, parent.basePath),
sqlExecutionId.get)
}>{sqlExecutionId.get}</a>}
</li>
}
}
{
if (jobData.jobGroup.isDefined) {
<li>
<strong>Job Group:</strong>
{jobData.jobGroup.get}
</li>
}
}
{
if (shouldShowActiveStages) {
<li>
<a href="#active"><strong>Active Stages:</strong></a>
{activeStages.size}
</li>
}
}
{
if (shouldShowPendingStages) {
<li>
<a href="#pending">
<strong>Pending Stages:</strong>
</a>{pendingOrSkippedStages.size}
</li>
}
}
{
if (shouldShowCompletedStages) {
<li>
<a href="#completed"><strong>Completed Stages:</strong></a>
{completedStages.size}
</li>
}
}
{
if (shouldShowSkippedStages) {
<li>
<a href="#skipped"><strong>Skipped Stages:</strong></a>
{pendingOrSkippedStages.size}
</li>
}
}
{
if (shouldShowFailedStages) {
<li>
<a href="#failed"><strong>Failed Stages:</strong></a>
{failedStages.size}
</li>
}
}
</ul>
</div>
var content = summary
val appStartTime = store.applicationInfo().attempts.head.startTime.getTime()
content ++= makeTimeline((activeStages ++ completedStages ++ failedStages).toSeq,
store.executorList(false), appStartTime)
val operationGraphContent = store.asOption(store.operationGraphForJob(jobId)) match {
case Some(operationGraph) => UIUtils.showDagVizForJob(jobId, operationGraph)
case None =>
<div id="no-info">
<p>No DAG visualization information to display for job {jobId}</p>
</div>
}
content ++= operationGraphContent
if (shouldShowActiveStages) {
content ++=
<span id="active" class="collapse-aggregated-activeStages collapse-table"
onClick="collapseTable('collapse-aggregated-activeStages','aggregated-activeStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Active Stages ({activeStages.size})</a>
</h4>
</span> ++
<div class="aggregated-activeStages collapsible-table">
{activeStagesTable.toNodeSeq}
</div>
}
if (shouldShowPendingStages) {
content ++=
<span id="pending" class="collapse-aggregated-pendingOrSkippedStages collapse-table"
onClick="collapseTable('collapse-aggregated-pendingOrSkippedStages',
'aggregated-pendingOrSkippedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Pending Stages ({pendingOrSkippedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-pendingOrSkippedStages collapsible-table">
{pendingOrSkippedStagesTable.toNodeSeq}
</div>
}
if (shouldShowCompletedStages) {
content ++=
<span id="completed" class="collapse-aggregated-completedStages collapse-table"
onClick="collapseTable('collapse-aggregated-completedStages',
'aggregated-completedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Completed Stages ({completedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-completedStages collapsible-table">
{completedStagesTable.toNodeSeq}
</div>
}
if (shouldShowSkippedStages) {
content ++=
<span id="skipped" class="collapse-aggregated-pendingOrSkippedStages collapse-table"
onClick="collapseTable('collapse-aggregated-pendingOrSkippedStages',
'aggregated-pendingOrSkippedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Skipped Stages ({pendingOrSkippedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-pendingOrSkippedStages collapsible-table">
{pendingOrSkippedStagesTable.toNodeSeq}
</div>
}
if (shouldShowFailedStages) {
content ++=
<span id ="failed" class="collapse-aggregated-failedStages collapse-table"
onClick="collapseTable('collapse-aggregated-failedStages','aggregated-failedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Failed Stages ({failedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-failedStages collapsible-table">
{failedStagesTable.toNodeSeq}
</div>
}
UIUtils.headerSparkPage(
request, s"Details for Job $jobId", content, parent, showVisualization = true)
}
}
| wangmiao1981/spark | core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala | Scala | apache-2.0 | 19,023 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import util.Arrays.asList
import kafka.common.BrokerEndPointNotAvailableException
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.UpdateMetadataRequest
import org.apache.kafka.common.requests.UpdateMetadataRequest.{Broker, EndPoint}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.junit.Test
import org.junit.Assert._
import scala.collection.JavaConverters._
class MetadataCacheTest {
@Test
def getTopicMetadataNonExistingTopics() {
val topic = "topic"
val cache = new MetadataCache(1)
val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))
assertTrue(topicMetadata.isEmpty)
}
@Test
def getTopicMetadata() {
val topic0 = "topic-0"
val topic1 = "topic-1"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
def endPoints(brokerId: Int): Seq[EndPoint] = {
val host = s"foo-$brokerId"
Seq(
new EndPoint(host, 9092, SecurityProtocol.PLAINTEXT, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)),
new EndPoint(host, 9093, SecurityProtocol.SSL, ListenerName.forSecurityProtocol(SecurityProtocol.SSL))
)
}
val brokers = (0 to 4).map { brokerId =>
new Broker(brokerId, endPoints(brokerId).asJava, "rack1")
}.toSet
val partitionStates = Map(
new TopicPartition(topic0, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 0, 0, asList(0, 1, 3), zkVersion, asList(0, 1, 3), asList()),
new TopicPartition(topic0, 1) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 1, 1, asList(1, 0), zkVersion, asList(1, 2, 0, 4), asList()),
new TopicPartition(topic1, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, 2, 2, asList(2, 1), zkVersion, asList(2, 1, 3), asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
for (securityProtocol <- Seq(SecurityProtocol.PLAINTEXT, SecurityProtocol.SSL)) {
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
def checkTopicMetadata(topic: String): Unit = {
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
assertEquals(topic, topicMetadata.topic)
val topicPartitionStates = partitionStates.filter { case (tp, _) => tp.topic == topic }
val partitionMetadatas = topicMetadata.partitionMetadata.asScala.sortBy(_.partition)
assertEquals(s"Unexpected partition count for topic $topic", topicPartitionStates.size, partitionMetadatas.size)
partitionMetadatas.zipWithIndex.foreach { case (partitionMetadata, partitionId) =>
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(partitionId, partitionMetadata.partition)
val leader = partitionMetadata.leader
val partitionState = topicPartitionStates(new TopicPartition(topic, partitionId))
assertEquals(partitionState.basePartitionState.leader, leader.id)
assertEquals(partitionState.basePartitionState.isr, partitionMetadata.isr.asScala.map(_.id).asJava)
assertEquals(partitionState.basePartitionState.replicas, partitionMetadata.replicas.asScala.map(_.id).asJava)
val endPoint = endPoints(partitionMetadata.leader.id).find(_.listenerName == listenerName).get
assertEquals(endPoint.host, leader.host)
assertEquals(endPoint.port, leader.port)
}
}
checkTopicMetadata(topic0)
checkTopicMetadata(topic1)
}
}
@Test
def getTopicMetadataPartitionLeaderNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, null))
val leader = 1
val leaderEpoch = 1
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, asList(0), zkVersion, asList(0), asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.LEADER_NOT_AVAILABLE, partitionMetadata.error)
assertTrue(partitionMetadata.isr.isEmpty)
assertEquals(1, partitionMetadata.replicas.size)
assertEquals(0, partitionMetadata.replicas.get(0).id)
}
@Test
def getTopicMetadataReplicaNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, null))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0, 1)
val isr = asList[Integer](0)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, zkVersion, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0, 1), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataIsrNotAvailable() {
val topic = "topic"
val cache = new MetadataCache(1)
val zkVersion = 3
val controllerId = 2
val controllerEpoch = 1
val securityProtocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(securityProtocol)
val brokers = Set(new Broker(0, Seq(new EndPoint("foo", 9092, securityProtocol, listenerName)).asJava, "rack1"))
// replica 1 is not available
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, zkVersion, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, controllerId, controllerEpoch,
partitionStates.asJava, brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
// Validate errorUnavailableEndpoints = false
val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false)
assertEquals(1, topicMetadatas.size)
val topicMetadata = topicMetadatas.head
assertEquals(Errors.NONE, topicMetadata.error)
val partitionMetadatas = topicMetadata.partitionMetadata
assertEquals(1, partitionMetadatas.size)
val partitionMetadata = partitionMetadatas.get(0)
assertEquals(0, partitionMetadata.partition)
assertEquals(Errors.NONE, partitionMetadata.error)
assertEquals(Set(0), partitionMetadata.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0, 1), partitionMetadata.isr.asScala.map(_.id).toSet)
// Validate errorUnavailableEndpoints = true
val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true)
assertEquals(1, topicMetadatasWithError.size)
val topicMetadataWithError = topicMetadatasWithError.head
assertEquals(Errors.NONE, topicMetadataWithError.error)
val partitionMetadatasWithError = topicMetadataWithError.partitionMetadata
assertEquals(1, partitionMetadatasWithError.size)
val partitionMetadataWithError = partitionMetadatasWithError.get(0)
assertEquals(0, partitionMetadataWithError.partition)
assertEquals(Errors.REPLICA_NOT_AVAILABLE, partitionMetadataWithError.error)
assertEquals(Set(0), partitionMetadataWithError.replicas.asScala.map(_.id).toSet)
assertEquals(Set(0), partitionMetadataWithError.isr.asScala.map(_.id).toSet)
}
@Test
def getTopicMetadataWithNonSupportedSecurityProtocol() {
val topic = "topic"
val cache = new MetadataCache(1)
val securityProtocol = SecurityProtocol.PLAINTEXT
val brokers = Set(new Broker(0,
Seq(new EndPoint("foo", 9092, securityProtocol, ListenerName.forSecurityProtocol(securityProtocol))).asJava, ""))
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, 3, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
try {
val result = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL))
fail(s"Exception should be thrown by `getTopicMetadata` with non-supported SecurityProtocol, $result was returned instead")
}
catch {
case _: BrokerEndPointNotAvailableException => //expected
}
}
@Test
def getAliveBrokersShouldNotBeMutatedByUpdateCache() {
val topic = "topic"
val cache = new MetadataCache(1)
def updateCache(brokerIds: Set[Int]) {
val brokers = brokerIds.map { brokerId =>
val securityProtocol = SecurityProtocol.PLAINTEXT
new Broker(brokerId, Seq(
new EndPoint("foo", 9092, securityProtocol, ListenerName.forSecurityProtocol(securityProtocol))).asJava, "")
}
val controllerEpoch = 1
val leader = 0
val leaderEpoch = 0
val replicas = asList[Integer](0)
val isr = asList[Integer](0, 1)
val partitionStates = Map(
new TopicPartition(topic, 0) -> new UpdateMetadataRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, 3, replicas, asList()))
val version = ApiKeys.UPDATE_METADATA.latestVersion
val updateMetadataRequest = new UpdateMetadataRequest.Builder(version, 2, controllerEpoch, partitionStates.asJava,
brokers.asJava).build()
cache.updateCache(15, updateMetadataRequest)
}
val initialBrokerIds = (0 to 2).toSet
updateCache(initialBrokerIds)
val aliveBrokersFromCache = cache.getAliveBrokers
// This should not change `aliveBrokersFromCache`
updateCache((0 to 3).toSet)
assertEquals(initialBrokerIds, aliveBrokersFromCache.map(_.id).toSet)
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala | Scala | apache-2.0 | 14,333 |
package org.vegas.compiler
import java.io.{PrintWriter, File}
import scala.collection.mutable.Queue
import scala.util.Try
import org.vegas.{log, vegasc}
import org.vegas.vtype.Scope
abstract class Compiler {
val options = vegasc.options
def compile(source: String): Option[String]
def >>:(that: String) = compile(that)
def >>:(that: FileReader) = compile(that.fileSource)
def >>:(that: Compiler) = new CompilerPipeline(that, this)
}
object Compiler {
var refCount = 0;
val scope = Scope()
val stringStorage = Queue[String]()
def usesRef = {
refCount += 1
"$__ref" + refCount
}
def usesFunctionRef = {
refCount += 1
"__ref" + refCount
}
}
trait StaticCompiler {
val compiler: Compiler
def apply() = compiler
def apply(source: String) = compiler.compile(source) getOrElse ""
}
sealed class CompilerPipeline(stage1: Compiler, stage2: Compiler) extends Compiler {
def compile(source: String) = Try(stage1.compile(source).fold[Option[String]](None)(stage2.compile(_))) recover {
case err: exception.VegasException => log(err.msg); None
case err => log(err.getClass.toString); None
} getOrElse None
}
case class FileReader(val filename: String) extends Compiler {
lazy val file = io.Source.fromFile(filename)
lazy val fileSource = try file.mkString finally file.close()
def compile(source: String) = Some(fileSource)
}
case class FileWriter(val filename: String) extends Compiler {
def compile(source: String) = {
val writer = new PrintWriter(new File(filename + ".php"))
writer.write(source)
writer.close()
Some("Success!")
}
}
object PassThru extends Compiler {
def compile(source: String) = Some(source)
}
object StdOut extends Compiler {
def compile(source: String) = {
println(source)
Some("Success!")
}
}
| rrdelaney/vegas | src/main/scala/org/vegas/compiler/Compiler.scala | Scala | mit | 1,914 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openchai.spark.rdd
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, Partitioner}
trait LsRDD[K, V] extends RDD[(K, V)] with Serializable {
def paths(): Seq[String]
def rackPaths = paths().map { p => RackPath(p) }.sortBy { p => p.fullPath }
type KV[A, B] = (A, B)
type KVO = KV[K, V]
def rackPathsToPartitions[T] = rackPaths.zipWithIndex.map { case (r, x) =>
LsRDDPartition[T](x, r)
}.toArray // .asInstanceOf[Array[Partition]]
protected[rdd] var parts: Array[Partition] = getPartitions
override protected def getPartitions: Array[Partition] =
rackPathsToPartitions[V].asInstanceOf[Array[Partition]]
override protected def getPreferredLocations(split: Partition): Seq[String] = {
val lsPartition = split.asInstanceOf[LsRDDPartition[Double]]
Seq(lsPartition.rackPath.host)
}
}
object LsRDD {
import reflect.runtime.universe._
val Delim = '\\t'
@inline def tagToClass[W: TypeTag] = typeTag[W].mirror.runtimeClass(typeOf[W]).newInstance.asInstanceOf[W]
case class LabeledArr(label: String, value: Double, data: Array[Double])
def converter[T: TypeTag, U: TypeTag](outClass: Class[U], oin: T): U = {
if (!oin.isInstanceOf[String]) {
throw new UnsupportedOperationException(s"Only RDD[String] presently supported as input to LsRDD (actual type=${oin.getClass.getName}. Check back later.")
}
val in = oin.asInstanceOf[String]
val string = classOf[String]
val double = classOf[Double]
val darr = classOf[Array[Double]]
val labeledarr = classOf[LabeledArr]
val out = outClass match {
case x if x == string => in
case x if x == double => in
case x if x == darr => in.split(Delim).map(_.toDouble)
case x if x == labeledarr => {
val headTail = in.split(Delim).splitAt(1)
(headTail._1.head, headTail._2.map(_.toDouble).splitAt(1))
}
case _ => throw new IllegalArgumentException(s"Type $outClass not supported")
}
out.asInstanceOf[U]
}
}
| javadba/p2p | src/main/scala/org/openchai/spark/rdd/LsRdd.scala | Scala | apache-2.0 | 2,823 |
package com.stulsoft.ysps.ptraits
/**
* @author Yuriy Stul
*/
class Impl1(val p1: Int, val p2: String) extends Trait1 {
}
object Impl1 extends App {
val t1: Trait1 = new Impl1(123, " the test")
println(t1.f1())
println(s"t1.p1=${t1.p1}")
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/ptraits/Impl1.scala | Scala | mit | 254 |
/*
* Copyright (c) 2016. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.compiler.parser
import org.argus.jawa.compiler.lexer.Token
import org.argus.jawa.compiler.lexer.Tokens._
import org.argus.jawa.compiler.util.CaseClassReflector
import org.argus.jawa.core.{DefaultReporter, JavaKnowledge, JawaType, Signature}
import org.argus.jawa.core.io.{NoPosition, Position}
import org.sireum.util._
/**
* @author <a href="mailto:fgwei521@gmail.com">Fengguo Wei</a>
*/
sealed trait JawaAstNode extends CaseClassReflector with JavaKnowledge {
def tokens: IList[Token]
def firstTokenOption: Option[Token] = tokens.headOption
lazy val lastTokenOption: Option[Token] = tokens.lastOption
def firstToken = firstTokenOption.get
lazy val lastToken = lastTokenOption.get
//for CompilationUnit it will be null
var enclosingTopLevelClass: TypeDefSymbol = null
protected trait Flattenable {
def tokens: IList[Token]
}
def getAllChildrenInclude: IList[JawaAstNode] = {
this :: getAllChildren
}
def getAllChildren: IList[JawaAstNode] = {
val allAsts: MList[JawaAstNode] = mlistEmpty
val worklist: MList[JawaAstNode] = mlistEmpty
allAsts += this
allAsts ++= this.immediateChildren
worklist ++= this.immediateChildren
while(worklist.nonEmpty){
val node = worklist.remove(0)
allAsts ++= node.immediateChildren
worklist ++= node.immediateChildren
}
allAsts.toList
}
def isEmpty = tokens.isEmpty
protected implicit def astNodeToFlattenable(node: JawaAstNode): Flattenable = new Flattenable { val tokens = node.tokens }
protected implicit def listToFlattenable[T](list: IList[T])(implicit ev$1: T => Flattenable): Flattenable = new Flattenable { val tokens = list flatMap { _.tokens } }
protected implicit def optionToFlattenable[T](option: Option[T])(implicit ev$1: T => Flattenable): Flattenable = new Flattenable { val tokens = option.toList flatMap { _.tokens } }
protected implicit def pairToFlattenable[T1, T2](pair: (T1, T2))(implicit ev$1: T1 => Flattenable, ev$2: T2 => Flattenable): Flattenable = new Flattenable { val tokens = pair._1.tokens ::: pair._2.tokens }
protected implicit def tripleToFlattenable[T1, T2, T3](triple: (T1, T2, T3))(implicit ev$1: T1 => Flattenable, ev$2: T2 => Flattenable, ev$3: T3 => Flattenable): Flattenable = new Flattenable { val tokens = triple._1.tokens ++ triple._2.tokens ++ triple._3.tokens }
protected implicit def eitherToFlattenable[T1, T2](either: T1 Either T2)(implicit ev$1: T1 => Flattenable, ev$2: T2 => Flattenable): Flattenable = new Flattenable {
val tokens = either match {
case Left(f) => f.tokens
case Right(f) => f.tokens
}
}
protected implicit def tokenToFlattenable(token: Token): Flattenable = new Flattenable { val tokens = List(token) }
protected def flatten(flattenables: Flattenable*): IList[Token] = flattenables.toList flatMap { _.tokens }
def immediateChildren: IList[JawaAstNode] = productIterator.toList flatten immediateAstNodes
private def immediateAstNodes(n: Any): IList[JawaAstNode] = n match {
case a: JawaAstNode => List(a)
case t: Token => Nil
case Some(x) => immediateAstNodes(x)
case xs @ (_ :: _) => xs flatMap { immediateAstNodes(_) }
case Left(x) => immediateAstNodes(x)
case Right(x) => immediateAstNodes(x)
case (l, r) => immediateAstNodes(l) ++ immediateAstNodes(r)
case (x, y, z) => immediateAstNodes(x) ++ immediateAstNodes(y) ++ immediateAstNodes(z)
case true | false | Nil | None => Nil
}
def toCode: String = {
val sb: StringBuilder = new StringBuilder
val (startline, startcolumn) = firstTokenOption match {
case Some(ft) => (ft.line, ft.column)
case None => (0, 0)
}
var prevline: Int = 0
var prevcolumn: Int = 0
tokens.foreach {
token =>
val line = token.line - startline
val column = if(token.line == 0) token.column - startcolumn else token.column
if(line != prevline) prevcolumn = 0
val text = token.rawtext
for(i <- 1 to line - prevline){
sb.append("\\n")
}
for(i <- 1 to column - prevcolumn){
sb.append(" ")
}
prevline = line
prevcolumn = column + token.length
sb.append(text)
}
sb.toString
}
/**
* Returns range of tokens in the node, or None if there are no tokens in the node
*/
def rangeOpt: Option[Range] =
if (tokens.isEmpty)
None
else {
val firstIndex = tokens.head.pos.start
val lastIndex = tokens.last.lastCharacterOffset
Some(Range(firstIndex, lastIndex - firstIndex + 1))
}
def pos: Position = {
if(tokens.isEmpty) NoPosition
else {
val firstIndex = tokens.head.pos.start
val lastIndex = tokens.last.lastCharacterOffset
println(firstToken.file, firstIndex, lastIndex)
Position.range(firstToken.file, firstIndex, lastIndex - firstIndex + 1)
}
}
}
sealed trait ParsableAstNode extends JawaAstNode
case class CompilationUnit(
topDecls: IList[ClassOrInterfaceDeclaration],
eofToken: Token) extends ParsableAstNode {
lazy val tokens = flatten(topDecls, eofToken)
}
sealed trait Declaration extends JawaAstNode {
def annotations: IList[Annotation]
def accessModifier: String = {
annotations.find { a => a.key == "AccessFlag" || a.key == "Access" } match{
case Some(a) => a.value
case None => ""
}
}
}
sealed trait JawaSymbol extends JawaAstNode {
def id: Token
}
sealed trait DefSymbol extends JawaSymbol
sealed trait RefSymbol extends JawaSymbol
sealed trait ClassSym {
def typ: JawaType
}
sealed trait MethodSym {
def signature: Signature
}
sealed trait FieldSym{
def FQN: String
def baseType: JawaType
def fieldName: String
}
sealed trait VarSym{
def varName: String
def owner: MethodDeclaration
}
sealed trait LocationSym{
def location: String
var locationIndex: Int = 0
def owner: MethodDeclaration
}
case class TypeDefSymbol(id: Token) extends DefSymbol with ClassSym {
lazy val tokens = flatten(id)
def typ: JawaType = getTypeFromName(id.text)
}
case class TypeSymbol(id: Token) extends RefSymbol with ClassSym {
lazy val tokens = flatten(id)
def typ: JawaType = getTypeFromName(id.text)
}
case class MethodDefSymbol(id: Token) extends DefSymbol with MethodSym {
lazy val tokens = flatten(id)
def baseType: JawaType = getClassTypeFromMethodFullName(id.text)
var signature: Signature = null
def methodName: String = getMethodNameFromMethodFullName(id.text)
}
case class MethodNameSymbol(id: Token) extends RefSymbol with MethodSym {
lazy val tokens = flatten(id)
def baseType: JawaType = getClassTypeFromMethodFullName(id.text)
var signature: Signature = null
def methodName: String = getMethodNameFromMethodFullName(id.text)
}
case class FieldDefSymbol(id: Token) extends DefSymbol with FieldSym {
lazy val tokens = flatten(id)
def FQN: String = id.text.replaceAll("@@", "")
def baseType: JawaType = getClassTypeFromFieldFQN(FQN)
def fieldName: String = getFieldNameFromFieldFQN(FQN)
}
case class FieldNameSymbol(id: Token) extends RefSymbol with FieldSym {
lazy val tokens = flatten(id)
def FQN: String = id.text.replaceAll("@@", "")
def baseType: JawaType = getClassTypeFromFieldFQN(FQN)
def fieldName: String = getFieldNameFromFieldFQN(FQN)
}
case class SignatureSymbol(id: Token) extends RefSymbol with MethodSym {
lazy val tokens = flatten(id)
def signature: Signature = new Signature(id.text)
// def FQMN: String = signature.FQMN
def methodName: String = signature.methodName
}
case class VarDefSymbol(id: Token) extends DefSymbol with VarSym {
lazy val tokens = flatten(id)
def varName: String = id.text
var owner: MethodDeclaration = null
}
case class VarSymbol(id: Token) extends RefSymbol with VarSym {
lazy val tokens = flatten(id)
def varName: String = id.text
var owner: MethodDeclaration = null
}
/**
* LocationSymbol is following form: #L00001. or just #
*/
case class LocationDefSymbol(id: Token) extends DefSymbol with LocationSym {
lazy val tokens = flatten(id)
def location: String = {
if(id.text == "#") id.text
else id.text.substring(1, id.text.length() - 1)
}
var owner: MethodDeclaration = null
}
/**
* JumpLocationSymbol is following form: L00001
*/
case class LocationSymbol(id: Token) extends RefSymbol with LocationSym {
lazy val tokens = flatten(id)
def location: String = id.text
var owner: MethodDeclaration = null
}
case class ClassOrInterfaceDeclaration(
dclToken: Token,
cityp: TypeDefSymbol,
annotations: IList[Annotation],
extendsAndImplimentsClausesOpt: Option[ExtendsAndImplimentsClauses],
instanceFieldDeclarationBlock: InstanceFieldDeclarationBlock,
staticFields: IList[StaticFieldDeclaration],
methods: IList[MethodDeclaration]) extends Declaration with ParsableAstNode {
lazy val tokens = flatten(dclToken, cityp, annotations, extendsAndImplimentsClausesOpt, instanceFieldDeclarationBlock, staticFields, methods)
def isInterface: Boolean = {
annotations.exists { a => a.key == "kind" && a.value == "interface" }
}
def parents: IList[JawaType] = extendsAndImplimentsClausesOpt match {case Some(e) => e.parents; case None => ilistEmpty}
def superClassOpt: Option[JawaType] = extendsAndImplimentsClausesOpt match{case Some(e) => e.superClassOpt; case None => None}
def interfaces: IList[JawaType] = extendsAndImplimentsClausesOpt match {case Some(e) => e.interfaces; case None => ilistEmpty}
def fields: IList[Field with Declaration] = instanceFieldDeclarationBlock.instanceFields ++ staticFields
def instanceFields: IList[InstanceFieldDeclaration] = instanceFieldDeclarationBlock.instanceFields
def typ: JawaType = cityp.typ
}
case class Annotation(
at: Token,
annotationID: Token,
annotationValueOpt: Option[AnnotationValue]) extends JawaAstNode {
lazy val tokens = flatten(at, annotationID, annotationValueOpt)
def key: String = annotationID.text
def value: String = annotationValueOpt.map(_.value).getOrElse("")
}
sealed trait AnnotationValue extends JawaAstNode {
def value: String
}
case class TypeExpressionValue(
typExp: TypeExpression) extends AnnotationValue {
lazy val tokens = flatten(typExp)
def value: String = typExp.typ.name
}
case class SymbolValue(
sym: JawaSymbol) extends AnnotationValue {
lazy val tokens = flatten(sym)
def value: String = sym.id.text
}
case class TokenValue(
token: Token) extends AnnotationValue {
lazy val tokens = flatten(token)
def value: String = token.text
}
case class ExtendsAndImplimentsClauses(
extendsAndImplementsToken: Token,
parentTyps: IList[(ExtendAndImpliment, Option[Token])]) extends JawaAstNode {
require(parentTyps.count(_._1.isExtend) <= 1)
lazy val tokens = flatten(extendsAndImplementsToken, parentTyps)
def parents: IList[JawaType] = parentTyps.map(_._1.typ)
def superClassOpt: Option[JawaType] = parentTyps.find(_._1.isExtend).map(_._1.typ)
def interfaces: IList[JawaType] = parentTyps.filter(_._1.isImplement).map(_._1.typ)
}
case class ExtendAndImpliment(
parenttyp: TypeSymbol,
annotations: IList[Annotation])extends JawaAstNode {
lazy val tokens = flatten(parenttyp, annotations)
def typ: JawaType = parenttyp.typ
def isExtend: Boolean = annotations.exists { a => a.key == "kind" && a.value == "class" }
def isImplement: Boolean = annotations.exists { a => a.key == "kind" && a.value == "interface" }
}
sealed trait Field extends JawaAstNode {
def typ: Type
def fieldSymbol: FieldDefSymbol
def FQN: String
def fieldName: String = getFieldNameFromFieldFQN(FQN)
def isStatic: Boolean
}
case class InstanceFieldDeclarationBlock(
lbrace: Token,
instanceFields: IList[InstanceFieldDeclaration],
rbrace: Token) extends JawaAstNode {
lazy val tokens = flatten(lbrace, instanceFields, rbrace)
}
case class InstanceFieldDeclaration(
typ: Type,
fieldSymbol: FieldDefSymbol,
annotations: IList[Annotation],
semi: Token) extends Field with Declaration {
lazy val tokens = flatten(typ, fieldSymbol, annotations, semi)
def FQN: String = fieldSymbol.FQN
def isStatic: Boolean = false
}
case class StaticFieldDeclaration(
staticFieldToken: Token,
typ: Type,
fieldSymbol: FieldDefSymbol,
annotations: IList[Annotation],
semi: Token) extends Field with Declaration {
lazy val tokens = flatten(staticFieldToken, typ, fieldSymbol, annotations, semi)
def FQN: String = fieldSymbol.FQN
def isStatic: Boolean = true
}
case class TypeExpression(hat: Token, typ_ : Type) extends JawaAstNode {
lazy val tokens = flatten(hat, typ_)
def typ: JawaType = typ_.typ
}
case class Type(base: Either[TypeSymbol, Token], typeFragments: IList[TypeFragment]) extends JawaAstNode {
lazy val tokens = flatten(base, typeFragments)
def dimentions: Int = typeFragments.size
def baseType: JawaType =
base match {
case Left(ts) => ts.typ
case Right(t) => getTypeFromName(t.text)
}
def typ: JawaType = getType(baseType.baseTyp, dimentions)
}
case class TypeFragment(lbracket: Token, rbracket: Token) extends JawaAstNode {
lazy val tokens = flatten(lbracket, rbracket)
}
case class MethodDeclaration(
dclToken: Token,
returnType: Type,
methodSymbol: MethodDefSymbol,
paramClause: ParamClause,
annotations: IList[Annotation],
var body: Body) extends Declaration with ParsableAstNode {
lazy val tokens = flatten(dclToken, returnType, methodSymbol, paramClause, annotations, body)
def isConstructor: Boolean = isJawaConstructor(name)
def name: String = methodSymbol.id.text.substring(methodSymbol.id.text.lastIndexOf(".") + 1)
def owner: String = annotations.find { a => a.key == "owner" }.get.value
def signature: Signature = new Signature(annotations.find { a => a.key == "signature" }.get.value)
def thisParam: Option[Param] = paramClause.thisParam
def param(i: Int): Param = paramClause.param(i)
def paramlist: IList[Param] = paramClause.paramlist
}
case class ParamClause(
lparen: Token,
params: IList[(Param, Option[Token])],
rparen: Token) extends JawaAstNode {
lazy val tokens = flatten(lparen, params, rparen)
def thisParam: Option[Param] = params.find { x => x._1.isThis }.map(_._1)
def param(i: Int): Param =
i match {
case n if n >= 0 && n < paramlist.size => paramlist(n)
case _ => throw new IndexOutOfBoundsException("List size " + paramlist.size + " but index " + i)
}
def paramlist: IList[Param] = params.filterNot(_._1.isThis).map(_._1)
}
case class Param(
typ: Type,
paramSymbol: VarDefSymbol,
annotations: IList[Annotation]) extends JawaAstNode {
lazy val tokens = flatten(typ, paramSymbol, annotations)
def isThis: Boolean = annotations.exists { a => a.key == "kind" && a.value == "this" }
def isObject: Boolean = annotations.exists { a => a.key == "kind" && (a.value == "this" || a.value == "object") }
def name: String = paramSymbol.id.text
}
sealed trait Body extends ParsableAstNode
case class UnresolvedBody(bodytokens: IList[Token]) extends Body {
lazy val tokens = flatten(bodytokens)
def resolve: ResolvedBody = JawaParser.parse[Body](tokens, resolveBody = true, new DefaultReporter).asInstanceOf[ResolvedBody]
}
case class ResolvedBody(
lbrace: Token,
locals: IList[LocalVarDeclaration],
locations: IList[Location],
catchClauses: IList[CatchClause],
rbrace: Token) extends Body {
lazy val tokens = flatten(lbrace, locals, locations, catchClauses, rbrace)
def getCatchClauses(index: Int): IList[CatchClause] = {
catchClauses.filter{
cc =>
index >= cc.range.fromLocation.locationIndex && index <= cc.range.toLocation.locationIndex
}
}
}
case class LocalVarDeclaration(
typOpt: Option[Type],
varSymbol: VarDefSymbol,
semi: Token) extends Declaration {
lazy val tokens = flatten(typOpt, varSymbol, semi)
def annotations: IList[Annotation] = ilistEmpty
def typ: JawaType = typOpt match {
case Some(t) => t.typ
case None => JAVA_TOPLEVEL_OBJECT_TYPE
}
}
case class Location(
locationSymbol: LocationDefSymbol,
statement: Statement,
semiOpt: Option[Token]) extends ParsableAstNode {
lazy val tokens = flatten(locationSymbol, statement, semiOpt)
def locationUri: String = {
if(locationSymbol.id.length <= 1) ""
else locationSymbol.location
}
def locationIndex = locationSymbol.locationIndex
}
sealed trait Statement extends JawaAstNode
case class CallStatement(
callToken: Token,
lhsOpt: Option[CallLhs],
methodNameSymbol: MethodNameSymbol,
argClause: ArgClause,
annotations: IList[Annotation]) extends Statement {
lazy val tokens = flatten(callToken, lhsOpt, methodNameSymbol, argClause, annotations)
//default is virtual call
def kind: String = annotations.find { a => a.key == "kind" }.map(_.value).getOrElse("virtual")
def signature: Signature = new Signature(annotations.find { a => a.key == "signature" }.get.value)
def classDescriptor: String = annotations.find { a => a.key == "classDescriptor" }.get.value
def isStatic: Boolean = kind == "static"
def isVirtual: Boolean = kind == "virtual"
def isSuper: Boolean = kind == "super"
def isDirect: Boolean = kind == "direct"
def isInterface: Boolean = kind == "interface"
def recvVarOpt: Option[VarSymbol] = if(isStatic) None else Some(argClause.varSymbols.head._1)
def argVars: IList[VarSymbol] = if(isStatic) argClause.varSymbols.map(_._1) else argClause.varSymbols.tail.map(_._1)
def argVar(i: Int): VarSymbol = {
i match {
case n if n >= 0 && n < argVars.size => argVars(n)
case _ => throw new IndexOutOfBoundsException("List size " + argVars.size + " but index " + i)
}
}
def recvOpt: Option[String] = if(isStatic) None else Some(argClause.arg(0))
def args: IList[String] = if(isStatic) argClause.varSymbols.map(_._1.id.text) else argClause.varSymbols.tail.map(_._1.id.text)
def arg(i: Int): String = {
i match {
case n if n >= 0 && n < args.size => args(n)
case _ => throw new IndexOutOfBoundsException("List size " + args.size + " but index " + i)
}
}
}
case class CallLhs(
lhs: VarSymbol,
assignOP: Token) extends JawaAstNode {
lazy val tokens = flatten(lhs, assignOP)
}
//case class CallLhs
case class ArgClause(
lparen: Token,
varSymbols: IList[(VarSymbol, Option[Token])],
rparen: Token) extends JawaAstNode {
lazy val tokens = flatten(lparen, varSymbols, rparen)
def arg(i: Int): String =
i match {
case n if n >= 0 && n < varSymbols.size => varSymbols(n)._1.id.text
case _ => throw new IndexOutOfBoundsException("List size " + varSymbols.size + " but index " + i)
}
}
case class AssignmentStatement(
lhs: Expression with LHS,
assignOP: Token,
rhs: Expression with RHS,
annotations: IList[Annotation]) extends Statement {
lazy val tokens = flatten(lhs, assignOP, rhs, annotations)
def kind: String = annotations.find { a => a.key == "kind" }.map(_.value).getOrElse({if(rhs.isInstanceOf[NewExpression])"object" else ""})
def typOpt: Option[JawaType] = annotations.find { a => a.key == "type" }.map(_.annotationValueOpt.get.asInstanceOf[TypeExpressionValue].typExp.typ)
}
case class ThrowStatement(
throwToken: Token,
varSymbol: VarSymbol) extends Statement {
lazy val tokens = flatten(throwToken, varSymbol)
}
case class IfStatement(
ifToken: Token,
cond: BinaryExpression,
thengoto: (Token, Token),
targetLocation: LocationSymbol) extends Statement {
lazy val tokens = flatten(ifToken, cond, thengoto, targetLocation)
}
case class GotoStatement(
goto: Token,
targetLocation: LocationSymbol) extends Statement {
lazy val tokens = flatten(goto, targetLocation)
}
case class SwitchStatement(
switchToken: Token,
condition: VarSymbol,
cases: IList[SwitchCase],
defaultCaseOpt: Option[SwitchDefaultCase]) extends Statement {
lazy val tokens = flatten(switchToken, condition, cases, defaultCaseOpt)
}
case class SwitchCase(
bar: Token,
constant: Token,
arrow: Token,
goto: Token,
targetLocation: LocationSymbol) extends JawaAstNode {
lazy val tokens = flatten(bar, constant, arrow, goto, targetLocation)
}
case class SwitchDefaultCase(
bar: Token,
elseToken: Token,
arrow: Token,
goto: Token,
targetLocation: LocationSymbol) extends JawaAstNode {
lazy val tokens = flatten(bar, elseToken, arrow, goto, targetLocation)
}
case class ReturnStatement(
returnToken: Token,
varOpt: Option[VarSymbol],
annotations: IList[Annotation]) extends Statement {
lazy val tokens = flatten(returnToken, varOpt, annotations)
def kind: String = annotations.find { a => a.key == "kind" }.map(_.value).getOrElse("")
}
case class MonitorStatement(
at: Token,
monitor: Token,
varSymbol: VarSymbol) extends Statement {
lazy val tokens = flatten(at, monitor, varSymbol)
def isEnter: Boolean = monitor.tokenType == MONITOR_ENTER
def isExit: Boolean = monitor.tokenType == MONITOR_EXIT
}
case class EmptyStatement(
annotations: IList[Annotation]) extends Statement {
lazy val tokens = flatten(annotations)
}
sealed trait Expression extends JawaAstNode
sealed trait LHS
sealed trait RHS
case class NameExpression(
varSymbol: Either[VarSymbol, FieldNameSymbol] // FieldNameSymbol here is static fields
) extends Expression with LHS with RHS {
lazy val tokens = flatten(varSymbol)
def name: String =
varSymbol match {
case Left(v) => v.varName
case Right(f) => f.FQN
}
def isStatic: Boolean = varSymbol.isRight
}
case class ExceptionExpression(
exception: Token) extends Expression with RHS {
lazy val tokens = flatten(exception)
}
case class NullExpression(
nul: Token) extends Expression with RHS {
lazy val tokens = flatten(nul)
}
case class ConstClassExpression(
const_class: Token,
at: Token,
typeToken: Token,
typExp: TypeExpression) extends Expression with RHS {
lazy val tokens = flatten(const_class, at, typeToken, typExp)
}
case class LengthExpression(
length: Token,
at: Token,
variable: Token,
varSymbol: VarSymbol) extends Expression with RHS {
lazy val tokens = flatten(length, at, variable, varSymbol)
}
case class IndexingExpression(
varSymbol: VarSymbol,
indices: IList[IndexingSuffix]) extends Expression with LHS with RHS {
lazy val tokens = flatten(varSymbol, indices)
def base: String = varSymbol.varName
def dimentions: Int = indices.size
}
case class IndexingSuffix(
lbracket: Token,
index: Either[VarSymbol, Token],
rbracket: Token) extends JawaAstNode {
lazy val tokens = flatten(lbracket, index, rbracket)
}
case class AccessExpression(
varSymbol: VarSymbol,
dot: Token,
fieldSym: FieldNameSymbol) extends Expression with LHS with RHS {
lazy val tokens = flatten(varSymbol, dot, fieldSym)
def base: String = varSymbol.varName
def fieldName: String = fieldSym.fieldName
}
case class TupleExpression(
lparen: Token,
constants: IList[(Token, Option[Token])],
rparen: Token) extends Expression with RHS {
lazy val tokens = flatten(lparen, constants, rparen)
def integers: IList[Int] = constants.map(_._1.text.toInt)
}
case class CastExpression(
lparen: Token,
typ: Type,
rparen: Token,
varSym: VarSymbol) extends Expression with RHS {
lazy val tokens = flatten(lparen, typ, rparen, varSym)
def varName: String = varSym.varName
}
case class NewExpression(
newToken: Token,
base: Either[TypeSymbol, Token],
typeFragmentsWithInit: IList[TypeFragmentWithInit]) extends Expression with RHS {
lazy val tokens = flatten(newToken, base, typeFragmentsWithInit)
def dimentions: Int = typeFragmentsWithInit.size
def baseType: JawaType =
base match {
case Left(ts) => ts.typ
case Right(t) => getTypeFromName(t.text)
}
def typ: JawaType = getType(baseType.baseTyp, dimentions)
}
case class TypeFragmentWithInit(lbracket: Token, varSymbols: IList[(VarSymbol, Option[Token])], rbracket: Token) extends JawaAstNode {
lazy val tokens = flatten(lbracket, varSymbols, rbracket)
def varNames: IList[String] = varSymbols.map(_._1.varName)
def varName(i: Int): String = varNames(i)
}
case class InstanceofExpression(
instanceof: Token,
at1: Token,
variable: Token,
varSymbol: VarSymbol,
at2: Token,
typeToken: Token,
typExp: TypeExpression) extends Expression with RHS {
lazy val tokens = flatten(instanceof, at1, variable, varSymbol, at2, typeToken, typExp)
}
case class LiteralExpression(
constant: Token) extends Expression with RHS {
lazy val tokens = flatten(constant)
private def getLiteral: String = {
val lit = constant.text
constant.tokenType match {
case STRING_LITERAL =>
lit.substring(1, lit.length() - 1)
case FLOATING_POINT_LITERAL =>
lit match {
case x if x.endsWith("F") => x.substring(0, x.length() - 1)
case x if x.endsWith("D") => x.substring(0, x.length() - 1)
case _ => lit
}
case INTEGER_LITERAL =>
lit match {
case x if x.endsWith("I") => x.substring(0, x.length() - 1)
case x if x.endsWith("L") => x.substring(0, x.length() - 1)
case _ => lit
}
case CHARACTER_LITERAL =>
lit
case _ =>
"0"
}
}
def getInt: Int = getLiteral.toInt
def getLong: Long = getLiteral.toLong
def getFloat: Float = getLiteral.toFloat
def getDouble: Double = getLiteral.toDouble
def getString: String = getLiteral
}
case class UnaryExpression(
op: Token,
unary: VarSymbol)
extends Expression with RHS {
lazy val tokens = flatten(op, unary)
}
case class BinaryExpression(
left: VarSymbol,
op: Token,
right: Either[VarSymbol, Token])
extends Expression with RHS {
lazy val tokens = flatten(left, op, right)
}
case class CmpExpression(
cmp: Token,
lparen: Token,
var1Symbol: VarSymbol,
comma: Token,
var2Symbol: VarSymbol,
rparen: Token) extends Expression with RHS {
lazy val tokens = flatten(cmp, lparen, var1Symbol, comma, var2Symbol, rparen)
def paramType: JawaType = {
cmp.text match {
case "fcmpl" | "fcmpg" => new JawaType("float")
case "dcmpl" | "dcmpg" => new JawaType("double")
case "lcmp" => new JawaType("long")
}
}
}
case class CatchClause(
catchToken: Token,
typ: Type,
range: CatchRange,
goto: Token,
targetLocation: LocationSymbol,
semi: Token) extends JawaAstNode {
lazy val tokens = flatten(catchToken, typ, range, goto, targetLocation, semi)
def from: String = range.fromLocation.location
def to: String = range.toLocation.location
}
case class CatchRange(
at: Token,
lbracket: Token,
fromLocation: LocationSymbol,
range: Token,
toLocation: LocationSymbol,
rbracket: Token) extends JawaAstNode {
lazy val tokens = flatten(at, lbracket, fromLocation, range, toLocation, rbracket)
}
| arguslab/jawa-compiler | src/main/scala/org/argus/jawa/compiler/parser/JawaAstNode.scala | Scala | epl-1.0 | 27,617 |
//############################################################################
// Overloads
//############################################################################
//############################################################################
object Ops {
def - = 0;
def -(c: Char) = c;
def -(i: Int) = i;
def -- = 0;
def --(c: Char) = c;
def --(i: Int) = i;
}
object Funcs {
def foo = 0;
// def foo() = 1;
def foo(c: Char) = 2;
def foo(i: Int) = 3;
}
object M1 {
def f[A](x: A) = 11;
def f[A <: Ordered[A]](x: Ordered[A]) = 12;
}
object M2 {
def f[A <: Ordered[A]](x: Ordered[A]) = 21;
def f[A](x: A) = 22;
}
object M3 {
def f(x: Int, f: Int => Int) = f(x)
def f(x: String, f: String => String) = f(x)
}
object overloads {
def check(what: String, actual: Any, expected: Any): Unit = {
val success: Boolean = actual == expected;
Console.print(if (success) "ok" else "KO");
var value: String = if (actual == null) "null" else actual.toString();
if (value == "\\u0000") value = "\\\\u0000";
Console.print(": " + what + " = " + value);
if (!success) Console.print(" != " + expected);
Console.println();
Console.flush();
}
def - = 0;
def -(c: Char) = c;
def -(i: Int) = i;
def -- = 0;
def --(c: Char) = c;
def --(i: Int) = i;
def test: Unit = {
check("-('a')", -('a'), -97);
check("-(97)", -(97), -97);
check("Ops.-('a')", Ops.-('a'), 'a');
check("Ops.-(97)", Ops.-(97), 97);
check("--", --, 0);
check("--('a')", --('a'), 'a');
check("--(97)", --(97), 97);
check("Ops.--", Ops.--, 0);
check("Ops.--('a')", Ops.--('a'), 'a');
check("Ops.--(97)", Ops.--(97), 97);
check("Funcs.foo", Funcs.foo, 0);
// check("Funcs.foo()", Funcs.foo(), 1);
check("Funcs.foo('a')", Funcs.foo('a'), 2);
check("Funcs.foo(97)", Funcs.foo(97), 3);
val x = 3;
check("M1.f(" + x +")", M1.f(x), 11);
check("M2.f(" + x +")", M2.f(x), 22);
// val y = new scala.collection.mutable.Stack[Int];
// check("M1.f(" + y +")", M1.f(y), 12);
// check("M2.f(" + y +")", M2.f(y), 21);
check("M3.f(\\"abc\\", _.reverse)", M3.f("abc", _.reverse), "cba")
check("M3.f(2, _ + 2)", M3.f(2, _ + 2), 4)
check("f(\\"abc\\", { case s: String => s})", M3.f("abc", { case s: String => s}), "abc")
}
}
//############################################################################
object Test {
def main(args: Array[String]): Unit = {
overloads.test;
}
}
//############################################################################
| som-snytt/dotty | tests/run/overloads.scala | Scala | apache-2.0 | 2,746 |
/*
* Artificial Intelligence for Humans
* Volume 1: Fundamental Algorithms
* Scala Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
* Copyright 2013 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.randomize
import java.util.Random
object BasicGenerateRandom {
}
/**
* A wrapper over Java's built in random number generator.
* @param rand The underlying random number generator.
*/
class BasicGenerateRandom private (rand: Random) extends AbstractGenerateRandom {
/**
* Construct a random number generator with the specified seed.
*
* @param seed The seed.
*/
def this(seed: Long) { this(new Random(seed)) }
/**
* Construct a random number generator with a time-based seed.
*/
def this() { this(new Random()) }
override def nextInt: Int = rand.nextInt
override def nextDouble(): Double = rand.nextDouble
override def nextFloat: Float = rand.nextFloat
override def nextLong: Long = rand.nextLong
override def nextBoolean: Boolean = rand.nextBoolean
override def nextGaussian: Double = rand.nextGaussian
}
| PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/randomize/BasicGenerateRandom.scala | Scala | apache-2.0 | 1,830 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.scalaz
import slamdata.Predef._
import scalaz._, Scalaz._
trait StateTInstances {
implicit def stateTCatchable[F[_]: Catchable: Monad, S]: Catchable[StateT[F, S, ?]] =
new Catchable[StateT[F, S, ?]] {
def attempt[A](fa: StateT[F, S, A]) =
StateT[F, S, Throwable \\/ A](s =>
Catchable[F].attempt(fa.run(s)) map {
case -\\/(t) => (s, t.left)
case \\/-((s1, a)) => (s1, a.right)
})
def fail[A](t: Throwable) =
StateT[F, S, A](_ => Catchable[F].fail(t))
}
}
object stateT extends StateTInstances {
object StateTContrib {
// how is this not a member of StateT???
def put[F[_]: Monad, S](s: S): StateT[F, S, Unit] =
StateT[F, S, Unit](_ => (s, ()).point[F])
// ditto
def get[F[_]: Monad, S]: StateT[F, S, S] =
StateT[F, S, S](s => (s, s).point[F])
}
}
| drostron/quasar | foundation/src/main/scala/quasar/contrib/scalaz/stateT.scala | Scala | apache-2.0 | 1,499 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.time
import java.util.Locale
import org.joda.time._
import org.joda.time.field.AbstractReadableInstantFieldProperty
class RichAbstractReadableInstantFieldProperty(underlying: AbstractReadableInstantFieldProperty) {
def shortText: String =
underlying.getAsShortText
def asShortText: String =
underlying.getAsShortText
def shortText(locale: Locale): String =
underlying.getAsShortText(locale)
def asShortText(locale: Locale): String =
underlying.getAsShortText(locale)
def asString: String =
underlying.getAsString
def text: String =
underlying.getAsText
def asText: String =
underlying.getAsText
def text(locale: Locale): String =
underlying.getAsText(locale)
def asText(locale: Locale): String =
underlying.getAsText(locale)
def durationField: DurationField =
underlying.getDurationField
def field: DateTimeField =
underlying.getField
def fieldType: DateTimeFieldType =
underlying.getFieldType
def leapAmount: Int =
underlying.getLeapAmount
def leapDurationField: DurationField =
underlying.getLeapDurationField
def maximumValue: Int =
underlying.getMaximumValue
def maxValue: Int =
underlying.getMaximumValue
def maximumValueOverall: Int =
underlying.getMaximumValueOverall
def maxValueOverall: Int =
underlying.getMaximumValueOverall
def minimumValue: Int =
underlying.getMinimumValue
def minValue: Int =
underlying.getMinimumValue
def minimumValueOverall: Int =
underlying.getMinimumValueOverall
def minValueOverall: Int =
underlying.getMinimumValueOverall
def name: String =
underlying.getName
def rangeDurationField: DurationField =
underlying.getRangeDurationField
def interval: Interval =
underlying.toInterval
}
| jorgeortiz85/scala-time | src/main/scala/org/scala_tools/time/RichAbstractReadableInstantFieldProperty.scala | Scala | apache-2.0 | 2,396 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package multipart
import cats.effect.Concurrent
import cats.effect.Resource
import cats.effect.std.Supervisor
import cats.syntax.all._
import fs2.Chunk
import fs2.Pipe
import fs2.Pull
import fs2.Pure
import fs2.RaiseThrowable
import fs2.Stream
import fs2.io.file.Files
import fs2.io.file.Flags
import fs2.io.file.Path
import org.http4s.internal.bug
import org.typelevel.ci.CIString
/** A low-level multipart-parsing pipe. Most end users will prefer EntityDecoder[Multipart]. */
object MultipartParser {
private[this] val CRLFBytesN = Array[Byte]('\\r', '\\n')
private[this] val DoubleCRLFBytesN = Array[Byte]('\\r', '\\n', '\\r', '\\n')
private[this] val DashDashBytesN = Array[Byte]('-', '-')
private[this] val BoundaryBytesN: Boundary => Array[Byte] = boundary =>
boundary.value.getBytes("UTF-8")
val StartLineBytesN: Boundary => Array[Byte] = BoundaryBytesN.andThen(DashDashBytesN ++ _)
/** `delimiter` in RFC 2046 */
private[this] val ExpectedBytesN: Boundary => Array[Byte] =
BoundaryBytesN.andThen(CRLFBytesN ++ DashDashBytesN ++ _)
private[this] val dashByte: Byte = '-'.toByte
private[this] val streamEmpty = Stream.empty
private type SplitStream[F[_]] = Pull[F, Nothing, (Stream[F, Byte], Stream[F, Byte])]
private[this] sealed trait Event
private[this] final case class PartStart(value: Headers) extends Event
private[this] final case class PartChunk(value: Chunk[Byte]) extends Event
private[this] case object PartEnd extends Event
def parseStreamed[F[_]: Concurrent](
boundary: Boundary,
limit: Int = 1024,
): Pipe[F, Byte, Multipart[F]] = { st =>
st.through(
parseToPartsStream(boundary, limit)
).fold(Vector.empty[Part[F]])(_ :+ _)
.map(Multipart(_, boundary))
}
def parseToPartsStream[F[_]](boundary: Boundary, limit: Int = 1024)(implicit
F: Concurrent[F]
): Pipe[F, Byte, Part[F]] = { st =>
st.through(
parseEvents[F](boundary, limit)
)
// The left half is the part under construction, the right half is a part to be emitted.
.evalMapAccumulate[F, Option[Part[F]], Option[Part[F]]](None) { (acc, item) =>
(acc, item) match {
case (None, PartStart(headers)) =>
F.pure((Some(Part(headers, Entity.empty)), None))
// Shouldn't happen if the `parseToEventsStream` contract holds.
case (None, (_: PartChunk | PartEnd)) =>
F.raiseError(bug("Missing PartStart"))
case (Some(acc0), PartChunk(chunk)) =>
F.pure((Some(acc0.copy(entity = Entity(acc0.body ++ Stream.chunk(chunk)))), None))
case (Some(_), PartEnd) =>
// Part done - emit it and start over.
F.pure((None, acc))
// Shouldn't happen if the `parseToEventsStream` contract holds.
case (Some(_), _: PartStart) =>
F.raiseError(bug("Missing PartEnd"))
}
}
.mapFilter(_._2)
}
private def splitAndIgnorePrev[F[_]](
values: Array[Byte],
state: Int,
c: Chunk[Byte],
): (Int, Stream[F, Byte]) = {
var i = 0
var currState = state
val len = values.length
while (currState < len && i < c.size) {
if (c(i) == values(currState))
currState += 1
else if (c(i) == values(0))
currState = 1
else
currState = 0
i += 1
}
if (currState == 0)
(0, Stream.empty)
else if (currState == len)
(currState, Stream.chunk(c.drop(i)))
else
(currState, Stream.empty)
}
/** Split a chunk in the case of a complete match:
*
* If it is a chunk that is between a partial match
* (middleChunked), consider the prior partial match
* as part of the data to emit.
*
* If it is a fully matched, fresh chunk (no carry over partial match),
* emit everything until the match, and everything after the match.
*
* If it is the continuation of a partial match,
* emit everything after the partial match.
*/
private def splitCompleteMatch[F[_]](
middleChunked: Boolean,
sti: Int,
i: Int,
acc: Stream[F, Byte],
carry: Stream[F, Byte],
c: Chunk[Byte],
): (Int, Stream[F, Byte], Stream[F, Byte]) =
if (middleChunked)
(
sti,
// Emit the partial match as well
acc ++ carry ++ Stream.chunk(c.take(i - sti)),
Stream.chunk(c.drop(i)),
) // Emit after the match
else
(
sti,
acc, // block completes partial match, so do not emit carry
Stream.chunk(c.drop(i)),
) // Emit everything after the match
/** Split a chunk in the case of a partial match:
*
* DO NOT USE. Was made private[http4s] because
* Jose messed up hard like 5 patches ago and now it breaks bincompat to
* remove.
*/
private def splitPartialMatch[F[_]](
middleChunked: Boolean,
currState: Int,
i: Int,
acc: Stream[F, Byte],
carry: Stream[F, Byte],
c: Chunk[Byte],
): (Int, Stream[F, Byte], Stream[F, Byte]) = {
val ixx = i - currState
if (middleChunked) {
val (lchunk, rchunk) = c.splitAt(ixx)
(currState, acc ++ carry ++ Stream.chunk(lchunk), Stream.chunk(rchunk))
} else
(currState, acc, carry ++ Stream.chunk(c))
}
/** Split a chunk as part of either a left or right
* stream depending on the byte sequence in `values`.
*
* `state` represents the current counter position
* for `values`, which is necessary to keep track of in the
* case of partial matches.
*
* `acc` holds the cumulative left stream values,
* and `carry` holds the values that may possibly
* be the byte sequence. As such, carry is re-emitted if it was an
* incomplete match, or ignored (as such excluding the sequence
* from the subsequent split stream).
*/
private[http4s] def splitOnChunk[F[_]](
values: Array[Byte],
state: Int,
c: Chunk[Byte],
acc: Stream[F, Byte],
carry: Stream[F, Byte],
): (Int, Stream[F, Byte], Stream[F, Byte]) = {
var i = 0
var currState = state
val len = values.length
while (currState < len && i < c.size) {
if (c(i) == values(currState))
currState += 1
else if (c(i) == values(0))
currState = 1
else
currState = 0
i += 1
}
// It will only be zero if
// the chunk matches from the very beginning,
// since currstate can never be greater than
// (i + state).
val middleChunked = i + state - currState > 0
if (currState == 0)
(0, acc ++ carry ++ Stream.chunk(c), Stream.empty)
else if (currState == len)
splitCompleteMatch(middleChunked, currState, i, acc, carry, c)
else
splitPartialMatch(middleChunked, currState, i, acc, carry, c)
}
/** Split a stream in half based on `values`,
* but check if it is either double dash terminated (end of multipart).
* SplitOrFinish also tracks a header limit size
*
* If it is, drain the epilogue and return the empty stream. if it is not,
* split on the `values` and raise an error if we lack a match
*/
private def splitOrFinish[F[_]: Concurrent](
values: Array[Byte],
stream: Stream[F, Byte],
limit: Int,
): SplitStream[F] = {
// Check if a particular chunk a final chunk, that is,
// whether it's the boundary plus an extra "--", indicating it's
// the last boundary
def checkIfLast(c: Chunk[Byte], rest: Stream[F, Byte]): SplitStream[F] = {
// precond: both c1 and c2 are nonempty chunks
def checkTwoNonEmpty(
c1: Chunk[Byte],
c2: Chunk[Byte],
remaining: Stream[F, Byte],
): SplitStream[F] =
if (c1(0) == dashByte && c2(0) == dashByte)
// Drain the multipart epilogue.
Pull.eval(rest.compile.drain) *>
Pull.pure((streamEmpty, streamEmpty))
else {
val (ix, l, r, add) =
splitOnChunkLimited[F](
values,
0,
Chunk.array(c1.toArray[Byte] ++ c2.toArray[Byte]),
Stream.empty,
Stream.empty,
)
go(remaining, ix, l, r, add)
}
if (c.size == 1)
rest.pull.uncons.flatMap {
case Some((chnk, remaining)) =>
checkTwoNonEmpty(c, chnk, remaining)
case None =>
Pull.raiseError[F](MalformedMessageBodyFailure("Malformed Multipart ending"))
}
else if (c(0) == dashByte && c(1) == dashByte)
// Drain the multipart epilogue.
Pull.eval(rest.compile.drain) *>
Pull.pure((streamEmpty, streamEmpty))
else {
val (ix, l, r, add) =
splitOnChunkLimited[F](values, 0, c, Stream.empty, Stream.empty)
go(rest, ix, l, r, add)
}
}
def go(
s: Stream[F, Byte],
state: Int,
lacc: Stream[F, Byte],
racc: Stream[F, Byte],
limitCTR: Int,
): SplitStream[F] =
if (limitCTR >= limit)
Pull.raiseError[F](
MalformedMessageBodyFailure(s"Part header was longer than $limit-byte limit")
)
else if (state == values.length)
Pull.pure((lacc, racc ++ s))
else
s.pull.uncons.flatMap {
case Some((chnk, str)) =>
val (ix, l, r, add) = splitOnChunkLimited[F](values, state, chnk, lacc, racc)
go(str, ix, l, r, limitCTR + add)
case None =>
Pull.raiseError[F](MalformedMessageBodyFailure("Invalid boundary - partial boundary"))
}
stream.pull.uncons.flatMap {
case Some((chunk, rest)) =>
checkIfLast(chunk, rest)
case None =>
Pull.raiseError[F](MalformedMessageBodyFailure("Invalid boundary - partial boundary"))
}
}
/** Take the stream of headers separated by
* double CRLF bytes and return the headers
*/
private def parseHeaders[F[_]: Concurrent](strim: Stream[F, Byte]): F[Headers] = {
def tailrecParse(s: Stream[F, Byte], headers: Headers): Pull[F, Headers, Unit] =
splitHalf[F](CRLFBytesN, s).flatMap { case (l, r) =>
l.through(fs2.text.utf8.decode[F])
.fold("")(_ ++ _)
.map { string =>
val ix = string.indexOf(':')
if (ix >= 0)
headers.put(
Header.Raw(CIString(string.substring(0, ix)), string.substring(ix + 1).trim)
)
else
headers
}
.pull
.echo >> r.pull.uncons.flatMap {
case Some(_) =>
tailrecParse(r, headers)
case None =>
Pull.done
}
}
tailrecParse(strim, Headers.empty).stream.compile.foldMonoid
}
/** Spit our `Stream[F, Byte]` into two halves.
* If we reach the end and the state is 0 (meaning we didn't match at all),
* then we return the concatenated parts of the stream.
*
* This method _always_ caps
*/
private def splitHalf[F[_]](values: Array[Byte], stream: Stream[F, Byte]): SplitStream[F] = {
def go(
s: Stream[F, Byte],
state: Int,
lacc: Stream[F, Byte],
racc: Stream[F, Byte],
): SplitStream[F] =
if (state == values.length)
Pull.pure((lacc, racc ++ s))
else
s.pull.uncons.flatMap {
case Some((chnk, str)) =>
val (ix, l, r) = splitOnChunk[F](values, state, chnk, lacc, racc)
go(str, ix, l, r)
case None =>
// We got to the end, and matched on nothing.
Pull.pure((lacc ++ racc, streamEmpty))
}
stream.pull.uncons.flatMap {
case Some((chunk, rest)) =>
val (ix, l, r) = splitOnChunk[F](values, 0, chunk, Stream.empty, Stream.empty)
go(rest, ix, l, r)
case None =>
Pull.pure((streamEmpty, streamEmpty))
}
}
/** Split a chunk in the case of a complete match:
*
* If it is a chunk that is between a partial match
* (middleChunked), consider the prior partial match
* as part of the data to emit.
*
* If it is a fully matched, fresh chunk (no carry over partial match),
* emit everything until the match, and everything after the match.
*
* If it is the continuation of a partial match,
* emit everything after the partial match.
*/
private def splitCompleteLimited[F[_]](
state: Int,
middleChunked: Boolean,
sti: Int,
i: Int,
acc: Stream[F, Byte],
carry: Stream[F, Byte],
c: Chunk[Byte],
): (Int, Stream[F, Byte], Stream[F, Byte], Int) =
if (middleChunked)
(
sti,
// Emit the partial match as well
acc ++ carry ++ Stream.chunk(c.take(i - sti)),
// Emit after the match
Stream.chunk(c.drop(i)),
state + i - sti,
)
else
(
sti,
acc, // block completes partial match, so do not emit carry
Stream.chunk(c.drop(i)), // Emit everything after the match
0,
)
/** Split a chunk in the case of a partial match:
*
* If it is a chunk that is between a partial match
* (middle chunked), the prior partial match is added to
* the accumulator, and the current partial match is
* considered to carry over.
*
* If it is a fresh chunk (no carry over partial match),
* everything prior to the partial match is added to the accumulator,
* and the partial match is considered the carry over.
*
* Else, if the whole block is a partial match,
* add it to the carry over
*/
private[http4s] def splitPartialLimited[F[_]](
state: Int,
middleChunked: Boolean,
currState: Int,
i: Int,
acc: Stream[F, Byte],
carry: Stream[F, Byte],
c: Chunk[Byte],
): (Int, Stream[F, Byte], Stream[F, Byte], Int) = {
val ixx = i - currState
if (middleChunked) {
val (lchunk, rchunk) = c.splitAt(ixx)
(
currState,
acc ++ carry ++ Stream.chunk(lchunk), // Emit previous carry
Stream.chunk(rchunk),
state + ixx,
)
} else
// Whole thing is partial match
(currState, acc, carry ++ Stream.chunk(c), 0)
}
private[http4s] def splitOnChunkLimited[F[_]](
values: Array[Byte],
state: Int,
c: Chunk[Byte],
acc: Stream[F, Byte],
carry: Stream[F, Byte],
): (Int, Stream[F, Byte], Stream[F, Byte], Int) = {
var i = 0
var currState = state
val len = values.length
while (currState < len && i < c.size) {
if (c(i) == values(currState))
currState += 1
else if (c(i) == values(0))
currState = 1
else
currState = 0
i += 1
}
// It will only be zero if
// the chunk matches from the very beginning,
// since currstate can never be greater than
// (i + state).
val middleChunked = i + state - currState > 0
if (currState == 0)
(0, acc ++ carry ++ Stream.chunk(c), Stream.empty, i)
else if (currState == len)
splitCompleteLimited(state, middleChunked, currState, i, acc, carry, c)
else
splitPartialLimited(state, middleChunked, currState, i, acc, carry, c)
}
// //////////////////////////////////////////////////////////
// File writing encoder
// /////////////////////////////////////////////////////////
/** Same as the other streamed parsing, except
* after a particular size, it buffers on a File.
*/
@deprecated("Use parseSupervisedFile", "0.23")
def parseStreamedFile[F[_]: Concurrent: Files](
boundary: Boundary,
limit: Int = 1024,
maxSizeBeforeWrite: Int = 52428800,
maxParts: Int = 20,
failOnLimit: Boolean = false,
): Pipe[F, Byte, Multipart[F]] = { st =>
st.through(
parseToPartsStreamedFile(boundary, limit, maxSizeBeforeWrite, maxParts, failOnLimit)
).fold(Vector.empty[Part[F]])(_ :+ _)
.map(Multipart(_, boundary))
}
@deprecated("Use parseSupervisedFile", "0.23")
def parseToPartsStreamedFile[F[_]: Concurrent: Files](
boundary: Boundary,
limit: Int = 1024,
maxSizeBeforeWrite: Int = 52428800,
maxParts: Int = 20,
failOnLimit: Boolean = false,
): Pipe[F, Byte, Part[F]] = {
val pullParts: Stream[F, Event] => Stream[F, Part[F]] =
Pull
.loop[F, Part[F], Stream[F, Event]](
_.pull.uncons1.flatMap(
_.traverse {
case (PartStart(headers), s) =>
partBodyFileStream(s, maxSizeBeforeWrite)
.flatMap { case (body, rest) =>
Pull.output1(Part(headers, Entity(body))).as(rest)
}
// Shouldn't happen if the `parseToEventsStream` contract holds.
case (_: PartChunk | PartEnd, _) =>
Pull.raiseError(bug("Missing PartStart"))
}
)
)(_)
.stream
_.through(
parseEvents[F](boundary, limit)
).through(
limitParts[F](maxParts, failOnLimit)
).through(pullParts)
}
private[this] def limitParts[F[_]: RaiseThrowable](
maxParts: Int,
failOnLimit: Boolean,
): Pipe[F, Event, Event] = {
def go(st: Stream[F, Event], partsCounter: Int): Pull[F, Event, Unit] =
st.pull.uncons1.flatMap {
case Some((event: PartStart, rest)) =>
if (partsCounter < maxParts) {
Pull.output1(event) >> go(rest, partsCounter + 1)
} else if (failOnLimit) {
Pull.raiseError[F](MalformedMessageBodyFailure("Parts limit exceeded"))
} else Pull.pure(())
case Some((event, rest)) =>
Pull.output1(event) >> go(rest, partsCounter)
case None => Pull.pure(())
}
go(_, 0).stream
}
// Consume `PartChunk`s until the first `PartEnd`, produce a stream with all the consumed data.
private[this] def partBodyFileStream[F[_]: Concurrent: Files](
stream: Stream[F, Event],
maxBeforeWrite: Int,
): Pull[F, Nothing, (Stream[F, Byte], Stream[F, Event])] = {
// Consume `PartChunk`s until the first `PartEnd`, and write all the data into the file.
def streamAndWrite(
s: Stream[F, Event],
lacc: Stream[Pure, Byte],
limitCTR: Int,
fileRef: Path,
): Pull[F, Nothing, Stream[F, Event]] =
if (limitCTR >= maxBeforeWrite)
Pull.eval(
lacc
.through(Files[F].writeAll(fileRef, Flags.Append))
.compile
.drain
) >> streamAndWrite(s, Stream.empty, 0, fileRef)
else
s.pull.uncons1.flatMap {
case Some((PartChunk(chnk), str)) =>
streamAndWrite(str, lacc ++ Stream.chunk(chnk), limitCTR + chnk.size, fileRef)
case Some((PartEnd, str)) =>
Pull
.eval(
lacc
.through(Files[F].writeAll(fileRef, Flags.Append))
.compile
.drain
)
.as(str)
// Shouldn't happen if the `parseToEventsStream` contract holds.
case Some((_: PartStart, _)) | None =>
Pull.raiseError(bug("Missing PartEnd"))
}
// Consume `PartChunks` until the first `PartEnd`, accumulating the data in memory.
// Produce a stream with all the accumulated data.
// Fall back to `streamAndWrite` after the limit is reached
def go(
s: Stream[F, Event],
lacc: Stream[Pure, Byte],
limitCTR: Int,
): Pull[F, Nothing, (Stream[F, Byte], Stream[F, Event])] =
if (limitCTR >= maxBeforeWrite)
Pull
.eval(Files[F].tempFile(None, "", "", None).allocated)
.flatMap { case (path, cleanup) =>
streamAndWrite(s, lacc, limitCTR, path)
.tupleLeft(Files[F].readAll(path, maxBeforeWrite, Flags.Read).onFinalizeWeak(cleanup))
.onError { case _ => Pull.eval(cleanup) }
}
else
s.pull.uncons1.flatMap {
case Some((PartChunk(chnk), str)) =>
go(str, lacc ++ Stream.chunk(chnk), limitCTR + chnk.size)
case Some((PartEnd, str)) =>
Pull.pure((lacc, str))
// Shouldn't happen if the `parseToEventsStream` contract holds.
case Some((_: PartStart, _)) | None =>
Pull.raiseError(bug("Missing PartEnd"))
}
go(stream, Stream.empty, 0)
}
// ///////////////////////////////////
// Resource-safe file-based parser //
// ///////////////////////////////////
/** Like parseStreamedFile, but the produced parts' resources are managed by the supervisor.
*/
private[multipart] def parseSupervisedFile[F[_]: Concurrent: Files](
supervisor: Supervisor[F],
boundary: Boundary,
limit: Int = 1024,
maxSizeBeforeWrite: Int = 52428800,
maxParts: Int = 20,
failOnLimit: Boolean = false,
chunkSize: Int = 8192,
): Pipe[F, Byte, Multipart[F]] = { st =>
st.through(
parseToPartsSupervisedFile(
supervisor,
boundary,
limit,
maxSizeBeforeWrite,
maxParts,
failOnLimit,
chunkSize,
)
).fold(Vector.empty[Part[F]])(_ :+ _)
.map(Multipart(_, boundary))
}
private[multipart] def parseToPartsSupervisedFile[F[_]](
supervisor: Supervisor[F],
boundary: Boundary,
limit: Int = 1024,
maxSizeBeforeWrite: Int = 52428800,
maxParts: Int = 20,
failOnLimit: Boolean = false,
chunkSize: Int = 8192,
)(implicit F: Concurrent[F], files: Files[F]): Pipe[F, Byte, Part[F]] = {
val createFile = superviseResource(supervisor, files.tempFile)
def append(file: Path, bytes: Stream[Pure, Byte]): F[Unit] =
bytes.through(files.writeAll(file, Flags.Append)).compile.drain
final case class Acc(file: Option[Path], bytes: Stream[Pure, Byte], bytesSize: Int)
def stepPartChunk(oldAcc: Acc, chunk: Chunk[Byte]): F[Acc] = {
val newSize = oldAcc.bytesSize + chunk.size
val newBytes = oldAcc.bytes ++ Stream.chunk(chunk)
if (newSize > maxSizeBeforeWrite) {
oldAcc.file
.fold(createFile)(F.pure)
.flatTap(append(_, newBytes))
.map(newFile => Acc(Some(newFile), Stream.empty, 0))
} else F.pure(Acc(oldAcc.file, newBytes, newSize))
}
val stepPartEnd: Acc => F[Stream[F, Byte]] = {
case Acc(None, bytes, _) => F.pure(bytes)
case Acc(Some(file), bytes, size) =>
append(file, bytes)
.whenA(size > 0)
.as(
files.readAll(file, chunkSize = chunkSize, Flags.Read)
)
}
val step: (Option[(Headers, Acc)], Event) => F[(Option[(Headers, Acc)], Option[Part[F]])] = {
case (None, PartStart(headers)) =>
val newAcc = Acc(None, Stream.empty, 0)
F.pure((Some((headers, newAcc)), None))
// Shouldn't happen if the `parseToEventsStream` contract holds.
case (None, (_: PartChunk | PartEnd)) =>
F.raiseError(bug("Missing PartStart"))
case (Some((headers, oldAcc)), PartChunk(chunk)) =>
stepPartChunk(oldAcc, chunk).map { newAcc =>
(Some((headers, newAcc)), None)
}
case (Some((headers, acc)), PartEnd) =>
// Part done - emit it and start over.
stepPartEnd(acc)
.map(body => (None, Some(Part(headers, Entity(body)))))
// Shouldn't happen if the `parseToEventsStream` contract holds.
case (Some(_), _: PartStart) =>
F.raiseError(bug("Missing PartEnd"))
}
_.through(
parseEvents(boundary, limit)
).through(
limitParts(maxParts, failOnLimit)
).evalMapAccumulate(none[(Headers, Acc)])(step)
.mapFilter(_._2)
}
// Acquire the resource in a separate fiber, which will remain running until the provided
// supervisor sees fit to cancel it. The resulting action waits for the resource to be acquired.
private[this] def superviseResource[F[_], A](
supervisor: Supervisor[F],
resource: Resource[F, A],
)(implicit F: Concurrent[F]): F[A] =
F.deferred[Either[Throwable, A]].flatMap { deferred =>
supervisor.supervise[Nothing](
resource.attempt
.evalTap(deferred.complete)
// In case of an error the exception brings down the fiber.
.rethrow
// Success - keep the resource alive until the supervisor cancels this fiber.
.useForever
) *> deferred.get.rethrow
}
// //////////////////////////
// Streaming event parser //
// //////////////////////////
/** Parse a stream of bytes into a stream of part events. The events come in the following order:
*
* - one `PartStart`;
* - any number of `PartChunk`s;
* - one `PartEnd`.
*
* Any number of such sequences may be produced.
*/
private[this] def parseEvents[F[_]: Concurrent](
boundary: Boundary,
headerLimit: Int,
): Pipe[F, Byte, Event] =
skipPrelude(boundary, _)
.flatMap(pullPartsEvents(boundary, _, headerLimit))
.stream
/** Drain the prelude and remove the first boundary. Only traverses until the first
* part.
*/
private[this] def skipPrelude[F[_]: Concurrent](
boundary: Boundary,
stream: Stream[F, Byte],
): Pull[F, Nothing, Stream[F, Byte]] = {
val dashBoundaryBytes = StartLineBytesN(boundary)
def go(s: Stream[F, Byte], state: Int): Pull[F, Nothing, Stream[F, Byte]] =
s.pull.uncons.flatMap {
case Some((chnk, rest)) =>
val (ix, remainder) = splitAndIgnorePrev(dashBoundaryBytes, state, chnk)
if (ix === dashBoundaryBytes.length) Pull.pure(remainder ++ rest)
else go(rest, ix)
case None =>
Pull.raiseError[F](MalformedMessageBodyFailure("Malformed Malformed match"))
}
go(stream, 0)
}
/** Pull part events for parts until the end of the stream. */
private[this] def pullPartsEvents[F[_]: Concurrent](
boundary: Boundary,
stream: Stream[F, Byte],
headerLimit: Int,
): Pull[F, Event, Unit] = {
val delimiterBytes = ExpectedBytesN(boundary)
// Headers on the left, the remainder on the right.
type Acc = (Stream[F, Byte], Stream[F, Byte])
val pullPartEvents0: Acc => Pull[F, Event, Stream[F, Byte]] =
(pullPartEvents[F](_, _, delimiterBytes)).tupled
splitOrFinish[F](DoubleCRLFBytesN, stream, headerLimit)
// We must have at least one part.
.ensure(MalformedMessageBodyFailure("Cannot parse empty stream")) {
// We can abuse reference equality here for efficiency, since `splitOrFinish`
// returns `empty` on a capped stream.
case (_, rest) => rest != streamEmpty
}
.flatMap(
_.iterateWhileM { acc =>
pullPartEvents0(acc).flatMap(
splitOrFinish(
DoubleCRLFBytesN,
_,
headerLimit,
)
)
} { case (_, rest) => rest != streamEmpty }.void
)
}
/** Pulls part events for a single part. */
private[this] def pullPartEvents[F[_]: Concurrent](
headerStream: Stream[F, Byte],
rest: Stream[F, Byte],
delimiterBytes: Array[Byte],
): Pull[F, Event, Stream[F, Byte]] =
Pull
.eval(parseHeaders(headerStream))
.flatMap(headers => Pull.output1(PartStart(headers): Event))
.productR(pullPartChunks(delimiterBytes, rest))
.flatMap { case rest =>
// We hit a boundary, but the rest of the stream is empty
// and thus it's not a properly capped multipart body
if (rest == streamEmpty)
Pull.raiseError[F](MalformedMessageBodyFailure("Part not terminated properly"))
else
Pull.output1(PartEnd).as(rest)
}
/** Split the stream on `delimiterBytes`, emitting the left part as `PartChunk` events. */
private[this] def pullPartChunks[F[_]: Concurrent](
delimiterBytes: Array[Byte],
stream: Stream[F, Byte],
): Pull[F, PartChunk, Stream[F, Byte]] = {
def go(
s: Stream[F, Byte],
state: Int,
racc: Stream[F, Byte],
): Pull[F, PartChunk, Stream[F, Byte]] =
if (state == delimiterBytes.length)
Pull.pure(racc ++ s)
else
s.pull.uncons.flatMap {
case Some((chnk, rest)) =>
val (ix, l, r) = splitOnChunk[F](delimiterBytes, state, chnk, Stream.empty, racc)
l.chunks.map(PartChunk(_)).pull.echo >> {
if (ix == delimiterBytes.length) Pull.pure(r ++ rest)
else go(rest, ix, r)
}
case None =>
Pull.raiseError[F](MalformedMessageBodyFailure("Invalid boundary - partial boundary"))
}
go(stream, 0, Stream.empty)
}
}
| http4s/http4s | core/shared/src/main/scala/org/http4s/multipart/MultipartParser.scala | Scala | apache-2.0 | 29,276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import java.io.File
import java.net.URI
import org.apache.log4j.Level
import org.scalatest.PrivateMethodTester
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.execution.{PartialReducerPartitionSpec, QueryExecution, ReusedSubqueryExec, ShuffledRowRDD, SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.command.DataWritingCommandExec
import org.apache.spark.sql.execution.datasources.noop.NoopDataSource
import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ENSURE_REQUIREMENTS, Exchange, REPARTITION, REPARTITION_WITH_NUM, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike, ShuffleOrigin}
import org.apache.spark.sql.execution.joins.{BaseJoinExec, BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLShuffleReadMetricsReporter
import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.TestData
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.QueryExecutionListener
import org.apache.spark.util.Utils
class AdaptiveQueryExecSuite
extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper
with PrivateMethodTester {
import testImplicits._
setupTestData()
private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = {
var finalPlanCnt = 0
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, sparkPlanInfo) =>
if (sparkPlanInfo.simpleString.startsWith(
"AdaptiveSparkPlan isFinalPlan=true")) {
finalPlanCnt += 1
}
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
val dfAdaptive = sql(query)
val planBefore = dfAdaptive.queryExecution.executedPlan
assert(planBefore.toString.startsWith("AdaptiveSparkPlan isFinalPlan=false"))
val result = dfAdaptive.collect()
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
val df = sql(query)
checkAnswer(df, result)
}
val planAfter = dfAdaptive.queryExecution.executedPlan
assert(planAfter.toString.startsWith("AdaptiveSparkPlan isFinalPlan=true"))
val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
spark.sparkContext.listenerBus.waitUntilEmpty()
// AQE will post `SparkListenerSQLAdaptiveExecutionUpdate` twice in case of subqueries that
// exist out of query stages.
val expectedFinalPlanCnt = adaptivePlan.find(_.subqueries.nonEmpty).map(_ => 2).getOrElse(1)
assert(finalPlanCnt == expectedFinalPlanCnt)
spark.sparkContext.removeSparkListener(listener)
val exchanges = adaptivePlan.collect {
case e: Exchange => e
}
assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.")
(dfAdaptive.queryExecution.sparkPlan, adaptivePlan)
}
private def findTopLevelBroadcastHashJoin(plan: SparkPlan): Seq[BroadcastHashJoinExec] = {
collect(plan) {
case j: BroadcastHashJoinExec => j
}
}
private def findTopLevelSortMergeJoin(plan: SparkPlan): Seq[SortMergeJoinExec] = {
collect(plan) {
case j: SortMergeJoinExec => j
}
}
private def findTopLevelBaseJoin(plan: SparkPlan): Seq[BaseJoinExec] = {
collect(plan) {
case j: BaseJoinExec => j
}
}
private def findReusedExchange(plan: SparkPlan): Seq[ReusedExchangeExec] = {
collectWithSubqueries(plan) {
case ShuffleQueryStageExec(_, e: ReusedExchangeExec, _) => e
case BroadcastQueryStageExec(_, e: ReusedExchangeExec, _) => e
}
}
private def findReusedSubquery(plan: SparkPlan): Seq[ReusedSubqueryExec] = {
collectWithSubqueries(plan) {
case e: ReusedSubqueryExec => e
}
}
private def checkNumLocalShuffleReaders(
plan: SparkPlan, numShufflesWithoutLocalReader: Int = 0): Unit = {
val numShuffles = collect(plan) {
case s: ShuffleQueryStageExec => s
}.length
val numLocalReaders = collect(plan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
numLocalReaders.foreach { r =>
val rdd = r.execute()
val parts = rdd.partitions
assert(parts.forall(rdd.preferredLocations(_).nonEmpty))
}
assert(numShuffles === (numLocalReaders.length + numShufflesWithoutLocalReader))
}
private def checkInitialPartitionNum(df: Dataset[_], numPartition: Int): Unit = {
// repartition obeys initialPartitionNum when adaptiveExecutionEnabled
val plan = df.queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
val shuffle = plan.asInstanceOf[AdaptiveSparkPlanExec].executedPlan.collect {
case s: ShuffleExchangeExec => s
}
assert(shuffle.size == 1)
assert(shuffle(0).outputPartitioning.numPartitions == numPartition)
}
test("Change merge join to broadcast join") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("Reuse the parallelism of CoalescedShuffleReaderExec in LocalShuffleReaderExec") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReaders = collect(adaptivePlan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
assert(localReaders.length == 2)
val localShuffleRDD0 = localReaders(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReaders(1).execute().asInstanceOf[ShuffledRowRDD]
// The pre-shuffle partition size is [0, 0, 0, 72, 0]
// We exclude the 0-size partitions, so only one partition, advisoryParallelism = 1
// the final parallelism is
// math.max(1, advisoryParallelism / numMappers): math.max(1, 1/2) = 1
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD0.getPartitions.length == 2)
// The pre-shuffle partition size is [0, 72, 0, 72, 126]
// We exclude the 0-size partitions, so only 3 partition, advisoryParallelism = 3
// the final parallelism is
// math.max(1, advisoryParallelism / numMappers): math.max(1, 3/2) = 1
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD1.getPartitions.length == 2)
}
}
test("Reuse the default parallelism in LocalShuffleReaderExec") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReaders = collect(adaptivePlan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
assert(localReaders.length == 2)
val localShuffleRDD0 = localReaders(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReaders(1).execute().asInstanceOf[ShuffledRowRDD]
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD0.getPartitions.length == 4)
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD1.getPartitions.length == 4)
}
}
test("Empty stage coalesced to 1-partition RDD") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true") {
val df1 = spark.range(10).withColumn("a", 'id)
val df2 = spark.range(10).withColumn("b", 'id)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[SortMergeJoinExec]).isDefined)
val coalescedReaders = collect(plan) {
case r: CustomShuffleReaderExec => r
}
assert(coalescedReaders.length == 3)
coalescedReaders.foreach(r => assert(r.partitionSpecs.length == 1))
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[BroadcastHashJoinExec]).isDefined)
val coalescedReaders = collect(plan) {
case r: CustomShuffleReaderExec => r
}
assert(coalescedReaders.length == 3, s"$plan")
coalescedReaders.foreach(r => assert(r.isLocalReader || r.partitionSpecs.length == 1))
}
}
}
test("Scalar subquery") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("Scalar subquery in later stages") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where (value + a) = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("multiple joins") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN testData3 t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// After applied the 'OptimizeLocalShuffleReader' rule, we can convert all the four
// shuffle reader to local shuffle reader in the bottom two 'BroadcastHashJoin'.
// For the top level 'BroadcastHashJoin', the probe side is not shuffle query stage
// and the build side shuffle query stage is also converted to local shuffle reader.
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("multiple joins with aggregate") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, sum(b) from testData3 group by a
| ) t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 1)
}
}
test("multiple joins with aggregate 2") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "500") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, max(b) b from testData2 group by a
| ) t3 ON t2.n = t3.b
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON value = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- Filter
// +- HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 1)
}
}
test("Exchange reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT value FROM testData join testData2 ON key = a " +
"join (SELECT value v from testData join testData3 ON key = a) on value = v")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 2)
// There is still a SMJ, and its two shuffles can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 2)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse with subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Subquery reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.isEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.nonEmpty)
}
}
test("Broadcast exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "20000000",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (" +
"SELECT /*+ broadcast(testData2) */ max(key) from testData join testData2 ON key = a) " +
"and a <= (" +
"SELECT /*+ broadcast(testData2) */ max(value) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
assert(ex.head.child.isInstanceOf[BroadcastExchangeExec])
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Union/Except/Intersect queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData
|EXCEPT
|SELECT * FROM testData2
|UNION ALL
|SELECT * FROM testData
|INTERSECT ALL
|SELECT * FROM testData2
""".stripMargin)
}
}
test("Subquery de-correlation in Union queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("a", "b") {
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("a")
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("b")
runAdaptiveAndVerifyResult(
"""
|SELECT id,num,source FROM (
| SELECT id, num, 'a' as source FROM a
| UNION ALL
| SELECT id, num, 'b' as source FROM b
|) AS c WHERE c.id IN (SELECT id FROM b WHERE num = 2)
""".stripMargin)
}
}
}
test("Avoid plan change if cost is greater") {
val origPlan = sql("SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1").queryExecution.executedPlan
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT.key -> "0") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val smj2 = findTopLevelSortMergeJoin(adaptivePlan)
assert(smj2.size == 2, origPlan.toString)
}
}
test("Change merge join to broadcast join without local shuffle reader") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.LOCAL_SHUFFLE_READER_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "40") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData t1 join testData2 t2
|ON t1.key = t2.a join testData3 t3 on t2.a = t3.a
|where t1.value = 1
""".stripMargin
)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
// There is still a SMJ, and its two shuffles can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 2)
}
}
test("Avoid changing merge join to broadcast join if too many empty partitions on build plan") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN.key -> "0.5") {
// `testData` is small enough to be broadcast but has empty partition ratio over the config.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.isEmpty)
}
// It is still possible to broadcast `testData2`.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2000") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
assert(bhj.head.buildSide == BuildRight)
}
}
}
test("SPARK-29906: AQE should not introduce extra shuffle for outermost limit") {
var numStages = 0
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numStages = jobStart.stageInfos.length
}
}
try {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
spark.sparkContext.addSparkListener(listener)
spark.range(0, 100, 1, numPartitions = 10).take(1)
spark.sparkContext.listenerBus.waitUntilEmpty()
// Should be only one stage since there is no shuffle.
assert(numStages == 1)
}
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
test("SPARK-30524: Do not optimize skew join if introduce additional shuffle") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "100",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 3 as key1", "id as value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 1 as key2", "id as value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(query: String, optimizeSkewJoin: Boolean): Unit = {
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(query)
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
assert(innerSmj.size == 1 && innerSmj.head.isSkewJoin == optimizeSkewJoin)
}
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2", true)
// Additional shuffle introduced, so disable the "OptimizeSkewedJoin" optimization
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 GROUP BY key1", false)
}
}
}
test("SPARK-29544: adaptive skew join with different join types") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(
joins: Seq[SortMergeJoinExec],
leftSkewNum: Int,
rightSkewNum: Int): Unit = {
assert(joins.size == 1 && joins.head.isSkewJoin)
assert(joins.head.left.collect {
case r: CustomShuffleReaderExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == leftSkewNum)
assert(joins.head.right.collect {
case r: CustomShuffleReaderExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == rightSkewNum)
}
// skewed inner join optimization
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
checkSkewJoin(innerSmj, 2, 1)
// skewed left outer join optimization
val (_, leftAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 left outer join skewData2 ON key1 = key2")
val leftSmj = findTopLevelSortMergeJoin(leftAdaptivePlan)
checkSkewJoin(leftSmj, 2, 0)
// skewed right outer join optimization
val (_, rightAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 right outer join skewData2 ON key1 = key2")
val rightSmj = findTopLevelSortMergeJoin(rightAdaptivePlan)
checkSkewJoin(rightSmj, 0, 1)
}
}
}
test("SPARK-30291: AQE should catch the exceptions when doing materialize") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTable("bucketed_table") {
val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val warehouseFilePath = new URI(spark.sessionState.conf.warehousePath).getPath
val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val aggregated = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[Exception] {
aggregated.count()
}
assert(error.toString contains "Invalid bucket file")
assert(error.getSuppressed.size === 0)
}
}
}
test("SPARK-30403: AQE should handle InSubquery") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult("SELECT * FROM testData LEFT OUTER join testData2" +
" ON key = a AND key NOT IN (select a from testData3) where value = '1'"
)
}
}
test("force apply AQE") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
test("SPARK-30719: do not log warning if intentionally skip AQE") {
val testAppender = new LogAppender("aqe logging warning test when skip")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(!plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
assert(!testAppender.loggingEvents
.exists(msg => msg.getRenderedMessage.contains(
s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is" +
s" enabled but is not supported for")))
}
test("test log level") {
def verifyLog(expectedLevel: Level): Unit = {
val logAppender = new LogAppender("adaptive execution")
withLogAppender(
logAppender,
loggerName = Some(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
level = Some(Level.TRACE)) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData join testData2 ON key = a where value = '1'").collect()
}
}
Seq("Plan changed", "Final plan").foreach { msg =>
assert(
logAppender.loggingEvents.exists { event =>
event.getRenderedMessage.contains(msg) && event.getLevel == expectedLevel
})
}
}
// Verify default log level
verifyLog(Level.DEBUG)
// Verify custom log level
val levels = Seq(
"TRACE" -> Level.TRACE,
"trace" -> Level.TRACE,
"DEBUG" -> Level.DEBUG,
"debug" -> Level.DEBUG,
"INFO" -> Level.INFO,
"info" -> Level.INFO,
"WARN" -> Level.WARN,
"warn" -> Level.WARN,
"ERROR" -> Level.ERROR,
"error" -> Level.ERROR,
"deBUG" -> Level.DEBUG)
levels.foreach { level =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
verifyLog(level._2)
}
}
}
test("tree string output") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = sql("SELECT * FROM testData join testData2 ON key = a where value = '1'")
val planBefore = df.queryExecution.executedPlan
assert(!planBefore.toString.contains("== Current Plan =="))
assert(!planBefore.toString.contains("== Initial Plan =="))
df.collect()
val planAfter = df.queryExecution.executedPlan
assert(planAfter.toString.contains("== Final Plan =="))
assert(planAfter.toString.contains("== Initial Plan =="))
}
}
test("SPARK-31384: avoid NPE in OptimizeSkewedJoin when there's 0 partition plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t2") {
// create DataFrame with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
// should run successfully without NPE
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
}
}
}
test("SPARK-34682: CustomShuffleReaderExec operating on canonicalized plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val readers = collect(adaptivePlan) {
case r: CustomShuffleReaderExec => r
}
assert(readers.length == 1)
val reader = readers.head
val c = reader.canonicalized.asInstanceOf[CustomShuffleReaderExec]
// we can't just call execute() because that has separate checks for canonicalized plans
val ex = intercept[IllegalStateException] {
val doExecute = PrivateMethod[Unit](Symbol("doExecute"))
c.invokePrivate(doExecute())
}
assert(ex.getMessage === "operating on canonicalized plan")
}
}
test("metrics of the shuffle reader") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val readers = collect(adaptivePlan) {
case r: CustomShuffleReaderExec => r
}
assert(readers.length == 1)
val reader = readers.head
assert(!reader.isLocalReader)
assert(!reader.hasSkewedPartition)
assert(reader.hasCoalescedPartition)
assert(reader.metrics.keys.toSeq.sorted == Seq(
"numPartitions", "partitionDataSize"))
assert(reader.metrics("numPartitions").value == reader.partitionSpecs.length)
assert(reader.metrics("partitionDataSize").value > 0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val join = collect(adaptivePlan) {
case j: BroadcastHashJoinExec => j
}.head
assert(join.buildSide == BuildLeft)
val readers = collect(join.right) {
case r: CustomShuffleReaderExec => r
}
assert(readers.length == 1)
val reader = readers.head
assert(reader.isLocalReader)
assert(reader.metrics.keys.toSeq == Seq("numPartitions"))
assert(reader.metrics("numPartitions").value == reader.partitionSpecs.length)
}
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val readers = collect(adaptivePlan) {
case r: CustomShuffleReaderExec => r
}
readers.foreach { reader =>
assert(!reader.isLocalReader)
assert(reader.hasCoalescedPartition)
assert(reader.hasSkewedPartition)
assert(reader.metrics.contains("numSkewedPartitions"))
}
assert(readers(0).metrics("numSkewedPartitions").value == 2)
assert(readers(0).metrics("numSkewedSplits").value == 15)
assert(readers(1).metrics("numSkewedPartitions").value == 1)
assert(readers(1).metrics("numSkewedSplits").value == 12)
}
}
}
}
test("control a plan explain mode in listeners via SQLConf") {
def checkPlanDescription(mode: String, expected: Seq[String]): Unit = {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, planDescription, _) =>
assert(expected.forall(planDescription.contains))
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode,
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val dfAdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'")
try {
checkAnswer(dfAdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil)
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
Seq(("simple", Seq("== Physical Plan ==")),
("extended", Seq("== Parsed Logical Plan ==", "== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==", "== Physical Plan ==")),
("codegen", Seq("WholeStageCodegen subtrees")),
("cost", Seq("== Optimized Logical Plan ==", "Statistics(sizeInBytes")),
("formatted", Seq("== Physical Plan ==", "Output", "Arguments"))).foreach {
case (mode, expected) =>
checkPlanDescription(mode, expected)
}
}
test("SPARK-30953: InsertAdaptiveSparkPlan should apply AQE on child plan of write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
val plan = sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").queryExecution.executedPlan
assert(plan.isInstanceOf[DataWritingCommandExec])
assert(plan.asInstanceOf[DataWritingCommandExec].child.isInstanceOf[AdaptiveSparkPlanExec])
}
}
}
test("AQE should set active session during execution") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.range(10).select(sum('id))
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
SparkSession.setActiveSession(null)
checkAnswer(df, Seq(Row(45)))
SparkSession.setActiveSession(spark) // recover the active session.
}
}
test("No deadlock in UI update") {
object TestStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _: Aggregate =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
spark.range(5).rdd
}
Nil
case _ => Nil
}
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val df = spark.range(10).groupBy('id).count()
df.collect()
} finally {
spark.experimental.extraStrategies = Nil
}
}
}
test("SPARK-31658: SQL UI should show write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, planInfo) =>
assert(planInfo.nodeName == "Execute CreateDataSourceTableAsSelectCommand")
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
try {
sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").collect()
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
}
test("SPARK-31220, SPARK-32056: repartition by expression with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).repartition($"id")
val df2 = spark.range(10).repartition($"id" + 1)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartition(10, $"id")
val df4 = spark.range(10).repartition(10)
assert(df3.rdd.collectPartitions().length == 10)
assert(df4.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition by range with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).toDF.repartitionByRange($"id".asc)
val df2 = spark.range(10).toDF.repartitionByRange(($"id" + 1).asc)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartitionByRange(10, $"id".asc)
assert(df3.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition using sql and hint with AQE") {
Seq(true, false).foreach { enableAQE =>
withTempView("test") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
spark.range(10).toDF.createTempView("test")
val df1 = spark.sql("SELECT /*+ REPARTITION(id) */ * from test")
val df2 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(id) */ * from test")
val df3 = spark.sql("SELECT * from test DISTRIBUTE BY id")
val df4 = spark.sql("SELECT * from test CLUSTER BY id")
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
val partitionsNum3 = df3.rdd.collectPartitions().length
val partitionsNum4 = df4.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
assert(partitionsNum3 < 10)
assert(partitionsNum4 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
checkInitialPartitionNum(df3, 10)
checkInitialPartitionNum(df4, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
assert(partitionsNum3 === 10)
assert(partitionsNum4 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df5 = spark.sql("SELECT /*+ REPARTITION(10, id) */ * from test")
val df6 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(10, id) */ * from test")
assert(df5.rdd.collectPartitions().length == 10)
assert(df6.rdd.collectPartitions().length == 10)
}
}
}
}
test("SPARK-32573: Eliminate NAAJ when BuildSide is HashedRelationWithAllNullKeys") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("SPARK-32717: AQEOptimizer should respect excludedRules configuration") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
// This test is a copy of test(SPARK-32573), in order to test the configuration
// `spark.sql.adaptive.optimizer.excludedRules` works as expect.
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> EliminateUnnecessaryJoin.ruleName) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
// this is different compares to test(SPARK-32573) due to the rule
// `EliminateUnnecessaryJoin` has been excluded.
assert(join.nonEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("SPARK-32649: Eliminate inner and semi join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
Seq(
// inner join (small table at right side)
"SELECT * FROM testData t1 join testData3 t2 ON t1.key = t2.a WHERE t2.b = 1",
// inner join (small table at left side)
"SELECT * FROM testData3 t1 join testData t2 ON t1.a = t2.key WHERE t1.b = 1",
// left semi join
"SELECT * FROM testData t1 left semi join testData3 t2 ON t1.key = t2.a AND t2.b = 1"
).foreach(query => {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
})
}
}
test("SPARK-34533: Eliminate left anti join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// broadcast non-empty right side
("SELECT /*+ broadcast(testData3) */ * FROM testData LEFT ANTI JOIN testData3", true),
// broadcast empty right side
("SELECT /*+ broadcast(emptyTestData) */ * FROM testData LEFT ANTI JOIN emptyTestData",
true),
// broadcast left side
("SELECT /*+ broadcast(testData) */ * FROM testData LEFT ANTI JOIN testData3", false)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-34781: Eliminate left semi/anti join to its left side") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// left semi join and non-empty right side
("SELECT * FROM testData LEFT SEMI JOIN testData3", true),
// left semi join, non-empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT SEMI JOIN testData3 t2 ON t1.key = t2.a", false),
// left anti join and empty right side
("SELECT * FROM testData LEFT ANTI JOIN emptyTestData", true),
// left anti join, empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT ANTI JOIN emptyTestData t2 ON t1.key = t2.key", true)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-32753: Only copy tags to node with no tags") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("v1") {
spark.range(10).union(spark.range(10)).createOrReplaceTempView("v1")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT id FROM v1 GROUP BY id DISTRIBUTE BY id")
assert(collect(adaptivePlan) {
case s: ShuffleExchangeExec => s
}.length == 1)
}
}
}
test("Logging plan changes for AQE") {
val testAppender = new LogAppender("plan changes")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "INFO",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData JOIN testData2 ON key = a " +
"WHERE value = (SELECT max(a) FROM testData3)").collect()
}
Seq("=== Result of Batch AQE Preparations ===",
"=== Result of Batch AQE Post Stage Creation ===",
"=== Result of Batch AQE Replanning ===",
"=== Result of Batch AQE Query Stage Optimization ===",
"=== Result of Batch AQE Final Query Stage Optimization ===").foreach { expectedMsg =>
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
}
}
}
test("SPARK-32932: Do not use local shuffle reader at final stage on write command") {
withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val data = for (
i <- 1L to 10L;
j <- 1L to 3L
) yield (i, j)
val df = data.toDF("i", "j").repartition($"j")
var noLocalReader: Boolean = false
val listener = new QueryExecutionListener {
override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = {
qe.executedPlan match {
case plan@(_: DataWritingCommandExec | _: V2TableWriteExec) =>
assert(plan.asInstanceOf[UnaryExecNode].child.isInstanceOf[AdaptiveSparkPlanExec])
noLocalReader = collect(plan) {
case exec: CustomShuffleReaderExec if exec.isLocalReader => exec
}.isEmpty
case _ => // ignore other events
}
}
override def onFailure(funcName: String, qe: QueryExecution,
exception: Exception): Unit = {}
}
spark.listenerManager.register(listener)
withTable("t") {
df.write.partitionBy("j").saveAsTable("t")
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalReader)
noLocalReader = false
}
// Test DataSource v2
val format = classOf[NoopDataSource].getName
df.write.format(format).mode("overwrite").save()
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalReader)
noLocalReader = false
spark.listenerManager.unregister(listener)
}
}
test("SPARK-33494: Do not use local shuffle reader for repartition") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.table("testData").repartition('key)
df.collect()
// local shuffle reader breaks partitioning and shouldn't be used for repartition operation
// which is specified by users.
checkNumLocalShuffleReaders(df.queryExecution.executedPlan, numShufflesWithoutLocalReader = 1)
}
}
test("SPARK-33551: Do not use custom shuffle reader for repartition") {
def hasRepartitionShuffle(plan: SparkPlan): Boolean = {
find(plan) {
case s: ShuffleExchangeLike =>
s.shuffleOrigin == REPARTITION || s.shuffleOrigin == REPARTITION_WITH_NUM
case _ => false
}.isDefined
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "5") {
val df = sql(
"""
|SELECT * FROM (
| SELECT * FROM testData WHERE key = 1
|)
|RIGHT OUTER JOIN testData2
|ON value = b
""".stripMargin)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
// Repartition with no partition num specified.
val dfRepartition = df.repartition('b)
dfRepartition.collect()
val plan = dfRepartition.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(plan))
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.length == 1)
checkNumLocalShuffleReaders(plan, 1)
// Probe side is coalesced.
val customReader = bhj.head.right.find(_.isInstanceOf[CustomShuffleReaderExec])
assert(customReader.isDefined)
assert(customReader.get.asInstanceOf[CustomShuffleReaderExec].hasCoalescedPartition)
// Repartition with partition default num specified.
val dfRepartitionWithNum = df.repartition(5, 'b)
dfRepartitionWithNum.collect()
val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(planWithNum))
val bhjWithNum = findTopLevelBroadcastHashJoin(planWithNum)
assert(bhjWithNum.length == 1)
checkNumLocalShuffleReaders(planWithNum, 1)
// Probe side is not coalesced.
assert(bhjWithNum.head.right.find(_.isInstanceOf[CustomShuffleReaderExec]).isEmpty)
// Repartition with partition non-default num specified.
val dfRepartitionWithNum2 = df.repartition(3, 'b)
dfRepartitionWithNum2.collect()
val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan
// The top shuffle from repartition is not optimized out, and this is the only shuffle that
// does not have local shuffle reader.
assert(hasRepartitionShuffle(planWithNum2))
val bhjWithNum2 = findTopLevelBroadcastHashJoin(planWithNum2)
assert(bhjWithNum2.length == 1)
checkNumLocalShuffleReaders(planWithNum2, 1)
val customReader2 = bhjWithNum2.head.right.find(_.isInstanceOf[CustomShuffleReaderExec])
assert(customReader2.isDefined)
assert(customReader2.get.asInstanceOf[CustomShuffleReaderExec].isLocalReader)
}
// Force skew join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_ENABLED.key -> "true",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_FACTOR.key -> "0",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
// Repartition with no partition num specified.
val dfRepartition = df.repartition('b)
dfRepartition.collect()
val plan = dfRepartition.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(plan))
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.length == 1)
// No skew join due to the repartition.
assert(!smj.head.isSkewJoin)
// Both sides are coalesced.
val customReaders = collect(smj.head) {
case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c
}
assert(customReaders.length == 2)
// Repartition with default partition num specified.
val dfRepartitionWithNum = df.repartition(5, 'b)
dfRepartitionWithNum.collect()
val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(planWithNum))
val smjWithNum = findTopLevelSortMergeJoin(planWithNum)
assert(smjWithNum.length == 1)
// No skew join due to the repartition.
assert(!smjWithNum.head.isSkewJoin)
// No coalesce due to the num in repartition.
val customReadersWithNum = collect(smjWithNum.head) {
case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c
}
assert(customReadersWithNum.isEmpty)
// Repartition with default non-partition num specified.
val dfRepartitionWithNum2 = df.repartition(3, 'b)
dfRepartitionWithNum2.collect()
val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan
// The top shuffle from repartition is not optimized out.
assert(hasRepartitionShuffle(planWithNum2))
val smjWithNum2 = findTopLevelSortMergeJoin(planWithNum2)
assert(smjWithNum2.length == 1)
// Skew join can apply as the repartition is not optimized out.
assert(smjWithNum2.head.isSkewJoin)
}
}
}
test("SPARK-34091: Batch shuffle fetch in AQE partition coalescing") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "10000",
SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "true") {
withTable("t1") {
spark.range(100).selectExpr("id + 1 as a").write.format("parquet").saveAsTable("t1")
val query = "SELECT SUM(a) FROM t1 GROUP BY a"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val metricName = SQLShuffleReadMetricsReporter.LOCAL_BLOCKS_FETCHED
val blocksFetchedMetric = collectFirst(adaptivePlan) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric.isDefined)
val blocksFetched = blocksFetchedMetric.get.value
withSQLConf(SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "false") {
val (_, adaptivePlan2) = runAdaptiveAndVerifyResult(query)
val blocksFetchedMetric2 = collectFirst(adaptivePlan2) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric2.isDefined)
val blocksFetched2 = blocksFetchedMetric2.get.value
assert(blocksFetched < blocksFetched2)
}
}
}
}
test("SPARK-33933: Materialize BroadcastQueryStage first in AQE") {
val testAppender = new LogAppender("aqe query stage materialization order test")
val df = spark.range(1000).select($"id" % 26, $"id" % 10)
.toDF("index", "pv")
val dim = Range(0, 26).map(x => (x, ('a' + x).toChar.toString))
.toDF("index", "name")
val testDf = df.groupBy("index")
.agg(sum($"pv").alias("pv"))
.join(dim, Seq("index"))
withLogAppender(testAppender, level = Some(Level.DEBUG)) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val result = testDf.collect()
assert(result.length == 26)
}
}
val materializeLogs = testAppender.loggingEvents
.map(_.getRenderedMessage)
.filter(_.startsWith("Materialize query stage"))
.toArray
assert(materializeLogs(0).startsWith("Materialize query stage BroadcastQueryStageExec"))
assert(materializeLogs(1).startsWith("Materialize query stage ShuffleQueryStageExec"))
}
test("SPARK-34899: Use origin plan if we can not coalesce shuffle partition") {
def checkNoCoalescePartitions(ds: Dataset[Row], origin: ShuffleOrigin): Unit = {
assert(collect(ds.queryExecution.executedPlan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
ds.collect()
val plan = ds.queryExecution.executedPlan
assert(collect(plan) {
case c: CustomShuffleReaderExec => c
}.isEmpty)
assert(collect(plan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
checkAnswer(ds, testData)
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "2258",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
val df = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10).toDF()
// partition size [1420, 1420]
checkNoCoalescePartitions(df.repartition(), REPARTITION)
// partition size [1140, 1119]
checkNoCoalescePartitions(df.sort($"key"), ENSURE_REQUIREMENTS)
}
}
}
| BryanCutler/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala | Scala | apache-2.0 | 64,940 |
package scutil.lang
import minitest._
object DisposerTest extends SimpleTestSuite {
test("a combined Disposer should execute both actions in order") {
var tmp = ""
val a = Disposer delay { tmp = tmp + "a" }
val b = Disposer delay { tmp = tmp + "b" }
val c = a combine b
c.dispose()
assertEquals(tmp, "ab")
}
test("in an exception thrown in the first of a combined Disposer should be work") {
var tmp = 0
var err = null:Exception
val a = Disposer delay { sys error "a failed" }
val b = Disposer delay { tmp = 2 }
val c = a combine b
try {
c.dispose()
}
catch { case e:Exception =>
err = e
}
assertEquals(tmp, 2)
assertEquals(err.getMessage, "a failed")
}
test("in an exception thrown in the second of a combined Disposer should be work") {
var tmp = 0
var err = null:Exception
val a = Disposer delay { tmp = 1 }
val b = Disposer delay { sys error "b failed" }
val c = a combine b
try {
c.dispose()
}
catch { case e:Exception =>
err = e
}
assertEquals(tmp, 1)
assertEquals(err.getMessage, "b failed")
}
test("in an exception thrown in both a combined Disposer should be work") {
var err = null:Exception
val a = Disposer delay { sys error "a failed" }
val b = Disposer delay { sys error "b failed" }
val c = a combine b
try {
c.dispose()
}
catch { case e:Exception =>
err = e
}
assertEquals(err.getMessage, "a failed")
assertEquals(err.getSuppressed.length, 1)
assertEquals(err.getSuppressed()(0).getMessage, "b failed")
}
}
| ritschwumm/scutil | modules/core/src/test/scala/scutil/lang/DisposerTest.scala | Scala | bsd-2-clause | 1,533 |
package com.twitter.util.validation.metadata
import java.lang.annotation.Annotation
import java.lang.reflect.Executable
abstract class ExecutableDescriptor private[validation] (val executable: Executable)
extends Descriptor {
def annotations: Array[Annotation]
def members: Map[String, PropertyDescriptor]
}
| twitter/util | util-validator/src/main/scala/com/twitter/util/validation/metadata/ExecutableDescriptor.scala | Scala | apache-2.0 | 318 |
package com.github.dzhg.tedis.server
import com.github.dzhg.tedis.TedisErrors
import com.github.dzhg.tedis.utils.{ServerAndClient, TedisSuite}
class HashSpec extends TedisSuite with ServerAndClient with TedisErrors {
"TedisServer" when {
"hset(key, field, value)" must {
"set hash field" in {
val result = client.hset("key", "field", "value")
result must be (true)
val v = client.hget("key", "field")
v.value must be ("value")
}
"throw error if key is not a hash" in {
client.set("key", "value")
val ex = the [Exception] thrownBy client.hset("key", "field", "value")
ex.getMessage must be (s"${WRONG_TYPE.error} ${WRONG_TYPE.msg}")
}
"set multiple fields with multiple calls" in {
client.hset("key", "f1", "v1")
client.hset("key", "f2", "v2")
val v1 = client.hget("key", "f1")
v1.value must be ("v1")
val v2 = client.hget("key", "f2")
v2.value must be ("v2")
}
}
"hget(key, field)" must {
"return correct value" in {
client.hset("key", "f1", "v1")
client.hget("key", "f1").value must be ("v1")
}
"return nil if key does not exist" in {
val v = client.hget("key", "f")
v mustBe empty
}
"return nil if field does not exist" in {
client.hset("key", "f1", "v1")
val v = client.get("key", "f2")
v mustBe empty
}
"throw error if key is not a hash" in {
client.set("key", "value")
val ex = the [Exception] thrownBy client.hget("key", "f1")
ex.getMessage must be (s"${WRONG_TYPE.error} ${WRONG_TYPE.msg}")
}
}
"hsetnx(key, field, value)" must {
"set the value if key does not exist" in {
val result = client.hsetnx("key", "f1", "v1")
result must be (true)
val v = client.hget("key", "f1")
v.value must be ("v1")
}
"not set the value if field exists" in {
val result = client.hset("key", "f1", "v1")
result must be (true)
val s = client.hsetnx("key", "f1", "v2")
s must be (false)
val v = client.hget("key", "f1")
v.value must be ("v1")
}
"set the value if field does not exist in the hash" in {
client.hset("key", "f1", "v1")
val b = client.hsetnx("key", "f2", "v2")
b must be (true)
val v = client.hget("key", "f2")
v.value must be ("v2")
}
"throw error if key is not a hash" in {
client.set("key", "value")
val ex = the [Exception] thrownBy client.hsetnx("key", "f1", "v1")
ex.getMessage must be (s"${WRONG_TYPE.error} ${WRONG_TYPE.msg}")
}
}
"hmset(key, ...)" must {
"set the value if key does not exist" in {
val result = client.hmset("key", Seq(("f1", "v1"), ("f2", "v2")))
result must be (true)
val v1 = client.hget("key", "f1")
v1.value must be ("v1")
val v2 = client.hget("key", "f2")
v2.value must be ("v2")
}
"set values if key exists" in {
client.hset("key", "f1", "v1")
client.hmset("key", Seq("f1" -> "v", "f2" -> "v2"))
val v1 = client.hget("key", "f1")
v1.value must be ("v")
val v2 = client.hget("key", "f2")
v2.value must be ("v2")
}
}
"hmget(key, ...)" must {
"return all values for fields" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2")))
val v = client.hmget("key", "f1", "f2")
v.value must be (Map("f1" -> "v1", "f2" -> "v2"))
}
"return correct values if not all fields exist" in {
client.hmset("key", Seq(("f1", "v1"), ("f3", "v3")))
val v = client.hmget("key", "f1", "f2", "f3")
v.value must be (Map("f1" -> "v1", "f3" -> "v3"))
}
"return empty map if the key does not exist" in {
val v = client.hmget("key", "f1", "f2")
v.value must have size 0
}
}
"hexists(key, field)" must {
"return true if the hash contains the field" in {
client.hset("k1", "f1", "v1")
val v = client.hexists("k1", "f1")
v must be (true)
}
"return false if key does not exist" in {
val v = client.hexists("k1", "f1")
v must be (false)
}
"return false if the hash does not contain the field" in {
client.hset("k1", "f1", "v1")
val v = client.hexists("k1", "f2")
v must be (false)
}
"throw error if key is not hash" in {
client.set("key", "value")
an [Exception] mustBe thrownBy (client.hexists("key", "field"))
}
}
"hdel(key, field, fields)" must {
"delete existing fields" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2"), ("f3", "v3")))
val v = client.hdel("key", "f1", "f3")
v.value must be (2)
val v1 = client.hget("key", "f1")
v1 mustBe empty
val v2 = client.hget("key", "f2")
v2.value must be ("v2")
}
"return 0 if key does not exist" in {
val v = client.hdel("key", "f1", "f2")
v.value must be (0)
}
"does not count non-existing fields" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2")))
val v = client.hdel("key", "f1", "f2", "f3", "f4")
v.value must be (2)
}
"throw error if key is not a hash" in {
client.set("key", "value")
an [Exception] mustBe thrownBy (client.hdel("key", "f1"))
}
}
"hlen(key)" must {
"return correct length for hash" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2"), ("f3", "v3")))
val v = client.hlen("key")
v.value must be (3)
}
"return 0 if key does not exist" in {
val v = client.hlen("key")
v.value must be (0)
}
"return 0 if hash is empty" in {
client.hset("key", "f", "v")
client.hdel("key", "f")
val v = client.hlen("key")
v.value must be (0)
}
"throw error if key is not a hash" in {
client.set("key", "value")
an [Exception] mustBe thrownBy (client.hlen("key"))
}
}
"hkeys(key)" must {
"return correct keys for hash" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2"), ("f3", "v3")))
val v = client.hkeys("key")
v.value must be (List("f1", "f2", "f3"))
}
"return empty list if key does not exist" in {
val v = client.hkeys("key")
v.value must have size 0
}
"return empty list if hash is empty" in {
client.hset("key", "f1", "v1")
client.hdel("key", "f1")
val v = client.hkeys("key")
v.value must have size 0
}
}
"hvals(key)" must {
"return correct values for hash" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2"), ("f3", "v3")))
val v = client.hvals("key")
v.value must be (List("v1", "v2", "v3"))
}
"return empty list if hash does not exist" in {
val v = client.hvals("key")
v.value must have size 0
}
"return empty list if hash is empty" in {
client.hset("key", "f1", "v1")
client.hdel("key", "f1")
val v = client.hvals("key")
v.value must have size 0
}
"throw error if key is not a hash" in {
client.set("key", "value")
an [Exception] mustBe thrownBy (client.hvals("key"))
}
}
"hgetall(key)" must {
"return correct key-value pairs" in {
client.hmset("key", Seq(("f1", "v1"), ("f2", "v2"), ("f3", "v3")))
val v = client.hgetall1("key")
v.value must be (Map("f1" -> "v1", "f2" -> "v2", "f3" -> "v3"))
}
"return None if hash does not exist" in {
val v = client.hgetall1("key")
v mustBe empty
}
"return None if hash is empty" in {
client.hset("key", "f1", "v1")
client.hdel("key", "f1")
val v = client.hgetall1("key")
v mustBe empty
}
"throw error if key is not a hash" in {
client.set("key", "value")
an [Exception] mustBe thrownBy (client.hgetall1("key"))
}
}
"hincrby(key, field, value)" must {
"return correct value after increment" in {
client.hset("key", "field", 1)
client.hincrby("key", "field", 1)
val v = client.hget("key", "field")
v.value must be ("2")
}
"return correct value if key does not exist" in {
client.hincrby("key", "field", 5)
val v = client.hget("key", "field")
v.value must be ("5")
}
"return correct value if field does not exist" in {
client.hset("key", "f1", 1)
client.hincrby("key", "f2", 5)
val v = client.hget("key", "f2")
v.value must be ("5")
}
"return correct value if increment is negative number" in {
client.hset("key", "field", 6)
client.hincrby("key", "field", -5)
val v = client.hget("key", "field")
v.value must be ("1")
}
}
"hincrbyfloat(key, field, value)" must {
"return correct value after increment" in {
client.hset("key", "field", "2.5")
val v = client.hincrbyfloat("key", "field", 1.2F)
v.value must be (3.7F)
}
"return correct value if key does not exist" in {
val v = client.hincrbyfloat("key", "f1", 2.0F)
v.value must be (2.0F)
}
"return correct value if field does not exist" in {
client.hset("key", "f1", "abc")
val v = client.hincrbyfloat("key", "f2", 1.5F)
v.value must be (1.5F)
}
"set the field with correct value" in {
client.hset("key", "f1", "5.5")
client.hincrbyfloat("key", "f1", 1.2F)
val v = client.hget("key", "f1")
v.value.toFloat must be (6.7F)
}
}
}
}
| dzhg/tedis | src/test/scala/com/github/dzhg/tedis/server/HashSpec.scala | Scala | mit | 9,963 |
package test_expect_failure.scala_test_jacocorunner
import org.scalatest.funsuite._
class EmptyTest extends AnyFunSuite {
test("empty test") {
assert(true)
}
} | bazelbuild/rules_scala | manual_test/scala_test_jacocorunner/EmptyTest.scala | Scala | apache-2.0 | 169 |
package ee.cone.c4ui.dep
import com.squareup.wire.ProtoAdapter
import ee.cone.c4actor.Types.SrcId
import ee.cone.c4actor._
import ee.cone.c4actor.dep.ContextTypes.{ContextId, RoleId, UserId}
import ee.cone.c4actor.dep.request.CurrentTimeRequestProtocol.N_CurrentTimeRequest
import ee.cone.c4actor.dep.{AskByPK, CommonRequestUtilityFactory, Dep, DepFactory}
import ee.cone.c4actor.dep_impl.RequestDep
import ee.cone.c4gate.SessionDataProtocol.{N_RawDataNode, U_RawSessionData}
import ee.cone.c4gate.deep_session.DeepSessionDataProtocol.{U_RawRoleData, U_RawUserData}
import ee.cone.c4gate.deep_session.{DeepRawSessionData, TxDeepRawDataLens, UserLevelAttr}
import ee.cone.c4gate.{KeyGenerator, SessionAttr}
import ee.cone.c4proto.{HasId, ToByteString}
import okio.ByteString
case class SessionAttrAskFactoryImpl(
qAdapterRegistry: QAdapterRegistry,
defaultModelRegistry: DefaultModelRegistry,
modelAccessFactory: ModelAccessFactory,
commonRequestFactory: CommonRequestUtilityFactory,
rawDataAsk: AskByPK[U_RawSessionData],
rawUserDataAsk: AskByPK[U_RawUserData],
rawRoleDataAsk: AskByPK[U_RawRoleData],
idGenUtil: IdGenUtil,
depFactory: DepFactory
) extends SessionAttrAskFactoryApi with KeyGenerator {
def askSessionAttrWithPK[P <: Product](attr: SessionAttr[P]): String ⇒ Dep[Option[Access[P]]] = pk ⇒ askSessionAttr(attr.withPK(pk))
def askSessionAttr[P <: Product](attr: SessionAttr[P]): Dep[Option[Access[P]]] =
askSessionAttrWithDefault(attr, defaultModelRegistry.get[P](attr.className).create)
def askSessionAttrWithDefault[P <: Product](attr: SessionAttr[P], default: SrcId ⇒ P): Dep[Option[Access[P]]] =
if (attr.metaList.contains(UserLevelAttr))
for {
mockRoleOpt ← commonRequestFactory.askMockRole
result ← {
mockRoleOpt match {
case Some((mockRoleId, editable)) ⇒
if (editable)
roleAsk(attr, mockRoleId, default)
else
deepAsk(attr, default, Some(""), Some(mockRoleId))
case None ⇒
deepAsk(attr, default)
}
}
} yield {
result
}
else
sessionAsk(attr, default)
def sessionAsk[P <: Product](attr: SessionAttr[P], default: SrcId ⇒ P): Dep[Option[Access[P]]] = {
val lens = ProdLens[U_RawSessionData, P](attr.metaList)(
rawData ⇒ qAdapterRegistry.byId(rawData.dataNode.get.valueTypeId).decode(rawData.dataNode.get.value).asInstanceOf[P],
value ⇒ rawData ⇒ {
val valueAdapter = qAdapterRegistry.byName(attr.className)
val byteString = ToByteString(valueAdapter.encode(value))
val newDataNode = rawData.dataNode.get.copy(valueTypeId = valueAdapter.id, value = byteString)
rawData.copy(dataNode = Option(newDataNode))
}
)
def rawSessionData: ContextId ⇒ U_RawSessionData = contextId ⇒
U_RawSessionData(
srcId = "",
sessionKey = contextId,
dataNode = Option(
N_RawDataNode(
domainSrcId = attr.pk,
fieldId = attr.id,
valueTypeId = 0,
value = ByteString.EMPTY
)
)
)
import commonRequestFactory._
for {
contextId ← askContextId
rawModel ← rawDataAsk.option(genPK(rawSessionData(contextId), rawDataAdapter))
} yield {
val request = rawSessionData(contextId)
val pk = genPK(request, rawDataAdapter)
val value: U_RawSessionData = rawModel.getOrElse({
val model: P = default(pk)
lens.set(model)(request.copy(srcId = pk))
}
)
modelAccessFactory.to(value).map(_.to(lens))
}
}
def roleAsk[P <: Product](attr: SessionAttr[P], roleKey: RoleId, default: SrcId ⇒ P): Dep[Option[Access[P]]] = {
val dataNode = Option(
N_RawDataNode(
domainSrcId = attr.pk,
fieldId = attr.id,
valueTypeId = 0,
value = ByteString.EMPTY
)
)
val lens = ProdLens[U_RawRoleData, P](attr.metaList)(
rawRoleData ⇒ qAdapterRegistry.byId(rawRoleData.dataNode.get.valueTypeId).decode(rawRoleData.dataNode.get.value).asInstanceOf[P],
value ⇒ rawRoleData ⇒ {
val valueAdapter = qAdapterRegistry.byName(attr.className)
val byteString = ToByteString(valueAdapter.encode(value))
val newDataNode = rawRoleData.dataNode.get.copy(valueTypeId = valueAdapter.id, value = byteString)
rawRoleData.copy(dataNode = Option(newDataNode))
}
)
val rawRoleData: U_RawRoleData =
U_RawRoleData(
srcId = "",
roleId = roleKey,
dataNode = dataNode
)
for {
rawModel ← rawRoleDataAsk.option(genPK(rawRoleData, rawRoleAdapter))
} yield {
val pk = genPK(rawRoleData, rawRoleAdapter)
val value = rawModel.getOrElse({
val model: P = default(pk)
lens.set(model)(rawRoleData.copy(srcId = pk))
}
)
modelAccessFactory.to(value).map(_.to(lens))
}
}
lazy val rawDataAdapter: ProtoAdapter[Product] with HasId = qAdapterRegistry.byName(classOf[U_RawSessionData].getName)
lazy val rawUserAdapter: ProtoAdapter[Product] with HasId = qAdapterRegistry.byName(classOf[U_RawUserData].getName)
lazy val rawRoleAdapter: ProtoAdapter[Product] with HasId = qAdapterRegistry.byName(classOf[U_RawRoleData].getName)
def deepAsk[P <: Product](attr: SessionAttr[P], default: SrcId ⇒ P, userIdOpt: Option[UserId] = None, roleIdOpt: Option[RoleId] = None): Dep[Option[Access[P]]] = {
val dataNode = Option(
N_RawDataNode(
domainSrcId = attr.pk,
fieldId = attr.id,
valueTypeId = 0,
value = ByteString.EMPTY
)
)
def rawSessionData: ContextId ⇒ U_RawSessionData = contextId ⇒
U_RawSessionData(
srcId = "",
sessionKey = contextId,
dataNode = dataNode
)
def rawUserData: UserId ⇒ U_RawUserData = userId ⇒
U_RawUserData(
srcId = "",
userId = userId,
dataNode = dataNode
)
def rawRoleData: RoleId ⇒ U_RawRoleData = userId ⇒
U_RawRoleData(
srcId = "",
roleId = userId,
dataNode = dataNode
)
import commonRequestFactory._
for {
contextId ← askContextId
rawSession ← rawDataAsk.option(genPK(rawSessionData(contextId), rawDataAdapter))
userId ← userIdOpt.map(depFactory.resolvedRequestDep).getOrElse(askUserId)
rawUser ← rawUserDataAsk.option(genPK(rawUserData(userId), rawUserAdapter))
roleId ← roleIdOpt.map(depFactory.resolvedRequestDep).getOrElse(askRoleId)
rawRole ← rawRoleDataAsk.option(genPK(rawRoleData(roleId), rawRoleAdapter))
} yield {
val rawDataPK = genPK(rawSessionData(contextId), rawDataAdapter)
val rawUserDataPK = genPK(rawUserData(userId), rawUserAdapter)
val rawRoleDataPK = genPK(rawRoleData(roleId), rawRoleAdapter)
val lensRaw = ProdLens[U_RawSessionData, P](attr.metaList)(
rawSessionData ⇒ qAdapterRegistry.byId(rawSessionData.dataNode.get.valueTypeId).decode(rawSessionData.dataNode.get.value).asInstanceOf[P],
value ⇒ rawRoleData ⇒ {
val valueAdapter = qAdapterRegistry.byName(attr.className)
val byteString = ToByteString(valueAdapter.encode(value))
val newDataNode = rawRoleData.dataNode.get.copy(valueTypeId = valueAdapter.id, value = byteString)
rawRoleData.copy(dataNode = Option(newDataNode))
}
)
val lensRawUser = ProdLens[U_RawUserData, P](attr.metaList)(
rawRoleData ⇒ qAdapterRegistry.byId(rawRoleData.dataNode.get.valueTypeId).decode(rawRoleData.dataNode.get.value).asInstanceOf[P],
value ⇒ rawRoleData ⇒ {
val valueAdapter = qAdapterRegistry.byName(attr.className)
val byteString = ToByteString(valueAdapter.encode(value))
val newDataNode = rawRoleData.dataNode.get.copy(valueTypeId = valueAdapter.id, value = byteString)
rawRoleData.copy(dataNode = Option(newDataNode))
}
)
val defaultRawData = lensRaw.set(default(rawDataPK))(rawSessionData(contextId).copy(srcId = rawDataPK))
val defaultRawUserData = lensRawUser.set(default(rawUserDataPK))(rawUserData(userId).copy(srcId = rawUserDataPK))
val data = DeepRawSessionData[P](rawSession, rawUser, rawRole, (defaultRawData, defaultRawUserData), (rawDataPK, rawUserDataPK, rawRoleDataPK))
val lens = ProdLens[DeepRawSessionData[P], P](attr.metaList)(
_.of(qAdapterRegistry),
value ⇒ deepData ⇒ deepData.set(qAdapterRegistry)(value)(deepData)
)
val access: AccessImpl[DeepRawSessionData[P]] = AccessImpl(data, Option(TxDeepRawDataLens(data)), NameMetaAttr("DeepRawSessionData") :: Nil)
Option(access.to(lens))
}
}
}
case object CurrentTimeAskFactoryImpl extends CurrentTimeAskFactoryApi {
def askCurrentTime(eachNSeconds: Long): Dep[Long] = new RequestDep[Long](N_CurrentTimeRequest(eachNSeconds))
}
| wregs/c4proto | c4ui-extra/src/main/scala/ee/cone/c4ui/dep/GateAskFactoryImpl.scala | Scala | apache-2.0 | 9,039 |
package com.psyanite.scorm.node
import scala.xml.NodeSeq
trait BaseNode {
def getText(nodeSeq: NodeSeq): Option[String] = {
nodeSeq.headOption match {
case Some(node) => Some(node.text)
case None => None
}
}
def getAttributeValue(nodeSeq: NodeSeq, attribute: String): Option[String] = {
nodeSeq.headOption.flatMap(_.attribute(attribute).map(_.text))
}
}
| psyanite/scorm-parser | src/main/scala/com/psyanite/scorm/node/BaseNode.scala | Scala | apache-2.0 | 421 |
package algorithms.graph.common
/**
* Created by Administrator on 2017/4/5.
*/
class Edge(
private var id: String,
private var src: String,
private var tar: String,
private var weight: Double = 1.0) extends Serializable {
/** @group setParam */
def setId(value: String): this.type = {
this.id = value
this
}
/** @group setParam */
def setSrc(value: String): this.type = {
this.src = value
this
}
/** @group setParam */
def setTar(value: String): this.type = {
this.tar = value
this
}
/** @group setParam */
def setWeight(value: Double): this.type = {
require(value > 0.0, "权重需大于0!")
this.weight = value
this
}
/** @group getParam */
def getId: String = this.id
/** @group getParam */
def getSrc: String = this.src
/** @group getParam */
def getTar: String = this.tar
/** @group getParam */
def getWeight: Double = this.weight
}
| yhao2014/CkoocNLP | ckooc-ml/src/main/scala/algorithms/graph/common/Edge.scala | Scala | apache-2.0 | 977 |
package com.twitter.finatra.http.integration.doeverything.test
import com.google.common.net.MediaType.JSON_UTF_8
import com.google.inject.{Key, TypeLiteral}
import com.twitter.finagle.http.Status._
import com.twitter.finatra.http.integration.doeverything.main.DoEverythingServer
import com.twitter.finatra.http.integration.doeverything.main.services.DoEverythingService
import com.twitter.finatra.http.test.EmbeddedHttpServer
import com.twitter.finatra.json.JsonDiff._
import com.twitter.inject.Test
class DoEverythingServerFeatureTest extends Test {
val server = new EmbeddedHttpServer(
extraArgs = Array("-magicNum=1", "-moduleMagicNum=2"),
twitterServer = new DoEverythingServer)
val doEverythingService = server.injector.instance[DoEverythingService]
val namedExampleString = server.injector.instance[String]("example")
"ExampleServer" should {
"named string" in {
namedExampleString should equal("named")
}
"response to /example" in {
val response = server.httpGet("/example/routing/always")
response.statusCode should equal(200)
response.contentString should equal("always response")
}
"json response to /example" in {
val response = server.httpGet("/example/routing/json/1")
response.statusCode should equal(200)
println(response.contentString)
jsonDiff(response.contentString, """{"id":"1","name":"bob","magic":"1","module_magic":"2"}""")
}
"ok" in {
server.httpGet(
"/ok",
andExpect = Ok)
}
"created" in {
server.httpGet(
"/created",
andExpect = Created)
}
"accepted" in {
server.httpGet(
"/accepted",
andExpect = Accepted,
withBody = "accepted")
}
"notfound" in {
server.httpGet(
"/notfound",
andExpect = NotFound)
}
"notfound exc" in {
server.httpGet(
"/notfoundexception",
andExpect = NotFound)
}
"badrequest" in {
server.httpGet(
"/badrequest",
andExpect = BadRequest)
}
"forbidden" in {
server.httpGet(
"/forbidden",
andExpect = Forbidden)
}
"methodnotallowed" in {
server.httpGet(
"/methodnotallowed",
andExpect = MethodNotAllowed)
}
"unavailable" in {
server.httpGet(
"/unavailable",
andExpect = ServiceUnavailable)
}
"unauthorized" in {
server.httpGet(
"/unauthorized",
andExpect = Unauthorized)
}
"conflict" in {
server.httpGet(
"/conflict",
andExpect = Conflict)
}
"servererror exc" in {
server.httpGet(
"/servererrorexception",
andExpect = InternalServerError)
}
"serviceunavailable exception" in {
server.httpGet(
"/serviceunavailableexception",
andExpect = ServiceUnavailable)
}
"serviceunavailable exception builder status code" in {
server.httpGet(
"/responsebuilder_status_code",
andExpect = ServiceUnavailable)
}
"serviceunavailable exception builder" in {
server.httpGet(
"/responsebuilder_status",
andExpect = ServiceUnavailable)
}
"redirect" in {
server.httpGet(
"/redirect",
andExpect = TemporaryRedirect)
}
"found" in {
server.httpGet(
"/found",
andExpect = Found)
}
"future" in {
server.httpGet(
"/future",
andExpect = Ok,
withBody = "future")
}
"post" in {
server.httpPost(
"/foo",
postBody = "",
andExpect = Ok,
withBody = "bar")
}
"post form" in {
server.httpFormPost(
"/formPost",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "bob")
}
"formPostView" in {
server.httpFormPost(
"/formPostView",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
}
"getView" in {
server.httpGet(
"/getView?age=18&name=bob",
andExpect = Ok,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderViewWithDiffTemplateThanAnnotation" in {
server.httpFormPost(
"/formPostViewFromBuilderView",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "age2:18\nname2:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderHtml" in {
server.httpFormPost(
"/formPostViewFromBuilderHtml",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Ok,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
}
"formPostViewFromBuilderCreatedView" in {
val response = server.httpFormPost(
"/formPostViewFromBuilderCreatedView",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Created,
withBody = "age2:18\nname2:bob\nuser1\nuser2\n")
response.location should equal(Some("/foo/1"))
}
"formPostViewFromBuilderCreatedHtml" in {
val response = server.httpFormPost(
"/formPostViewFromBuilderCreatedHtml",
params = Map("name" -> "bob", "age" -> "18"),
andExpect = Created,
withBody = "age:18\nname:bob\nuser1\nuser2\n")
response.location should equal(Some("/foo/1"))
}
"post user with injected group_id from route param" in {
server.httpPost(
"/groups/123/users",
postBody =
"""
{
"name" : "Bob"
}
"""",
andExpect = Created,
withJsonBody =
"""
{
"group_id":123,
"name":"Bob"
}
""")
}
"null" in {
pending
server.httpGet(
"/null",
andExpect = Ok,
withBody = "")
}
"empty" in {
server.httpGet(
"/empty",
andExpect = Ok,
withBody = "")
}
"unit" in {
server.httpGet(
"/unit",
andExpect = Ok,
withBody = "")
}
"not found path" in {
server.httpGet(
"/sdafasdfsadfsadfsafd",
andExpect = NotFound,
withBody = "/sdafasdfsadfsadfsafd route not found")
}
"complex path" in {
server.httpGet(
"/complexpath/steve",
andExpect = Ok,
withBody = "done steve 5000")
}
"complex query" in {
server.httpGet(
"/complexquery?name=fred",
andExpect = Ok,
withBody = "done fred 5000")
}
"testfile" in {
server.httpGet(
"/testfile",
andExpect = Ok,
withBody = "testfile123")
}
"testfile when not found" in {
server.httpGet(
"/testfileWhenNotfound",
andExpect = NotFound,
withBody = "/doesntexist.txt not found")
}
"exception" in {
server.httpGet(
"/exception",
andExpect = InternalServerError)
}
"pathUrl" in {
server.httpGet(
"/pathUrl",
andExpect = Ok,
withBody = "http://localhost.twitter.com/pathUrl/")
}
"path" in {
server.httpGet(
"/path",
andExpect = Ok,
withBody = "http://localhost.twitter.com/path/")
}
"get to put" in {
server.httpGet(
"/put",
andExpect = NotFound) //TODO: Should be 405 Method Not Allowed
}
"put to put" in {
server.httpPut(
"/put",
putBody = "asdf",
andExpect = Ok,
withBody = "asdf")
}
"post to putAndPost" in {
server.httpPost(
"/putAndPost",
postBody = "1",
andExpect = Ok,
withBody = "POST1")
}
"put to putAndPost" in {
server.httpPut(
"/putAndPost",
putBody = "2",
andExpect = Ok,
withBody = "PUT2")
}
"get to putAndPost" in {
server.httpGet(
"/putAndPost",
andExpect = NotFound) //TODO: Should be 405 Method Not Allowed
}
"post to postAndPut" in {
server.httpPost(
"/postAndPut",
postBody = "1",
andExpect = Ok,
withBody = "POST1")
}
"put to postAndPut" in {
server.httpPut(
"/postAndPut",
putBody = "2",
andExpect = Ok,
withBody = "PUT2")
}
"get to postAndPut" in {
server.httpGet(
"/postAndPut",
andExpect = NotFound) //TODO: Should be 405 Method Not Allowed
}
"true" in {
server.httpGet(
"/true",
andExpect = Ok,
withBody = "true")
}
"index root" in {
server.httpGet(
"/index/",
andExpect = Ok,
withBody = "testindex")
}
"index file without extension" in {
server.httpGet(
"/index/testfile",
andExpect = Ok,
withBody = "testindex")
}
"index file with extension" in {
server.httpGet(
"/index/testfile.txt",
andExpect = Ok,
withBody = "testfile123")
}
"implicitOkAndException when ok" in {
server.httpGet(
"/implicitOkAndException?hi",
andExpect = Ok)
}
"implicitOkAndException when bad request exception" in {
server.httpGet(
"/implicitOkAndException",
andExpect = BadRequest)
}
"slow" in {
pending // manually run to test fix for go/jira/CSL-565
server.httpGet(
"/slow",
andExpect = Ok)
}
"response builder" in {
val response = server.httpGet(
"/builderCreatedWithHeader",
andExpect = Created,
withLocation = "http://foo.com/1")
response.headers().get("a") should equal("b")
}
"request injection" in {
server.httpGet(
"/requestInjection?id=5&id2=6&id4=7",
andExpect = Ok,
withBody = "18")
}
"request injections not found" in {
server.httpGet(
"/requestInjectionsNotFound",
andExpect = InternalServerError,
withErrors = Seq(
"internal server error"))
}
"GET request injections not available" in {
server.httpGet(
"/requestInjectionsNotAvailable",
andExpect = InternalServerError,
withErrors = Seq(
"internal server error"))
}
"POST request injections not available" in {
server.httpPost(
"/requestInjectionsNotAvailable",
"{}",
andExpect = InternalServerError,
withErrors = Seq(
"internal server error"))
}
"POST empty json request injections not available" in {
server.httpPost(
"/requestInjectionsNotAvailable",
"",
andExpect = InternalServerError,
withErrors = Seq(
"internal server error"))
}
"POST invalid json request injections not available" in {
server.httpPost(
"/requestInjectionsNotAvailable",
"{abc",
andExpect = BadRequest,
withErrors = Seq(
"Unexpected character ('a' (code 97)): was expecting double-quote to start field name"))
}
"GET json user" in {
val response = server.httpGet(
"/users/mary",
andExpect = Ok,
withJsonBody = """{ "name" : "mary" }""")
response.headerMap("content-type") should equal(JSON_UTF_8.toString)
}
"POST json user" in {
server.httpPost(
"/users",
"""
{
"name" : "bob"
}
""",
andExpect = Ok,
withBody = "bob")
}
"POST json user with missing required field" in {
server.httpPost(
"/users",
"""
{
}
""",
andExpect = BadRequest,
withErrors = Seq("name is a required field"))
}
"POST json user with failed field validation" in {
server.httpPost(
"/users",
"""
{
"name": "a"
}
""",
andExpect = BadRequest,
withErrors = Seq("name size [1] is not between 2 and 20"))
}
"POST json user with failed method validation" in {
server.httpPost(
"/users",
"""
{
"name": "foo"
}
""",
andExpect = BadRequest,
withErrors = Seq("name cannot be foo"))
}
"POST json user with invalid field validation" in {
server.httpPost(
"/userWithInvalidFieldValidation",
"""
{
"name": "a"
}
""",
andExpect = InternalServerError,
withErrors = Seq("internal server error"))
}
"POST json user with invalid method validation" in {
server.httpPost(
"/userWithInvalidMethodValidation",
"""
{
"name": "foo"
}
""",
andExpect = InternalServerError,
withErrors = Seq("internal server error"))
}
"POST json user with invalid content type" in {
server.httpPost(
"/users",
"""
{
"name" : "bob"
}
""",
contentType = "foo",
andExpect = BadRequest)
}
"POST json user with missing required field when message body reader uses intermediate JsonNode" in {
pending //IllegalArgumentException (ObjectMapper.java:2774)
server.httpPost(
"/userWithMessageBodyReader",
"""
{
}
""",
andExpect = BadRequest,
withErrors = Seq("name is a required field"))
}
"POST json user with method validation error when message body reader uses intermediate JsonNode" in {
pending //IllegalArgumentException (ObjectMapper.java:2774)
server.httpPost(
"/userWithMessageBodyReader",
"""
{
"name": "foo"
}
""",
andExpect = BadRequest,
withErrors = Seq("name cannot be foo"))
}
"injector test" in {
server.injector.instance[String]("example") should equal("named")
val exampleService = server.injector.instance[DoEverythingService]
exampleService should not equal (null)
server.injector.instance(classOf[DoEverythingService]) should equal(exampleService)
val key = Key.get(new TypeLiteral[DoEverythingService]() {})
server.injector.instance(key) should equal(exampleService)
}
"array" in {
server.httpGet(
"/array",
andExpect = Ok,
withJsonBody = """["a", "b"]""")
}
"set" in {
server.httpGet(
"/set",
andExpect = Ok,
withJsonBody = """["a", "b"]""")
}
"seq" in {
server.httpGet(
"/seq",
andExpect = Ok,
withJsonBody = """["a", "b"]""")
}
"delete" in {
server.httpDelete(
"/delete",
andExpect = Ok,
withBody = "delete")
}
"options" in {
server.httpOptions(
"/options",
andExpect = Ok,
withBody = "options")
}
"head" in {
server.httpHead(
"/head",
andExpect = Conflict,
withBody = "") //HEAD requests cannot have bodies
}
"patch" in {
server.httpPatch(
"/patch",
andExpect = Ok,
withBody = "patch")
}
"non guice controller" in {
server.httpGet(
"/NonGuice",
andExpect = Ok,
withBody = "pong")
}
"GET with query parameters as string sequence" in {
server.httpGet(
"/RequestWithQueryParamSeqString?foo=1&foo=2&foo=3",
andExpect = Ok,
withJsonBody =
"""
|{ "foo": ["11", "21", "31"] }
""".stripMargin
)
}
"GET with query parameters as long sequence" in {
server.httpGet(
"/RequestWithQueryParamSeqLong?foo=1&foo=2&foo=3",
andExpect = Ok,
withJsonBody =
"""
|{ "foo": [2, 3, 4] }
""".stripMargin
)
}
}
"HttpResponseException" in {
server.httpGet(
"/HttpResponseException",
andExpect = Conflict,
withBody = "conflicted")
}
"toFutureException" in {
server.httpGet(
"/toFutureException",
andExpect = Conflict,
withBody = "conflicted")
}
"HttpExceptionPlain" in {
server.httpGet(
"/HttpExceptionPlain",
andExpect = Created,
withBody = "foo")
}
"HttpExceptionErrors" in {
server.httpGet(
"/HttpExceptionErrors",
andExpect = Created,
withJsonBody =
"""
{
"errors" : [ "foo1", "foo2" ]
}
""")
}
"NotFoundException" in {
server.httpGet(
"/NotFoundException",
andExpect = NotFound,
withJsonBody =
"""
{
"errors" : [ "foo1" ]
}
""")
}
"ConflictException" in {
server.httpGet(
"/ConflictException",
andExpect = Conflict,
withJsonBody =
"""
{
"errors" : [ "foo1" ]
}
""")
}
"InternalServerErrorExceptionPlain" in {
server.httpGet(
"/InternalServerErrorExceptionPlain",
andExpect = InternalServerError,
withBody = "foo1")
}
"NotAcceptableException" in {
server.httpGet(
"/NotAcceptableException",
andExpect = NotAcceptable,
withJsonBody =
"""
{
"errors" : [ "foo1" ]
}
""")
}
"Unserializable class field" in {
server.httpGet(
"/UnserializableClassField",
andExpect = InternalServerError,
withJsonBody =
"""
{
"errors" : [ "internal server error" ]
}
""")
}
"FooException" in {
val response = server.httpGet(
"/FooException/42",
andExpect = Forbidden,
withBody = "foo")
response.headerMap("Foo-ID") should equal("42")
}
"BarException" in {
val response = server.httpGet(
"/BarException",
andExpect = Unauthorized,
withBody = "bar")
response.headerMap.contains("Foo-ID") should equal(false)
response.headerMap("Bar-ID") should equal("123")
}
"BazException" in {
val response = server.httpGet(
"/BazException",
andExpect = Forbidden,
withBody = "foo")
response.headerMap("Foo-ID") should equal("321")
}
"NoSuchMethodException" in {
server.httpGet(
"/NoSuchMethodException",
andExpect = InternalServerError)
}
"UsersRequest" in {
server.httpGet(
path = "/users?start_date=2013&max=10&verbose=true",
andExpect = Ok,
withJsonBody = """
{
"start_date": "2013-01-01T00:00:00.000Z",
"max": 10,
"verbose": true
}
""")
server.httpGet(
path = "/users?max=10",
andExpect = Ok,
withJsonBody = """
{
"max": 10,
"verbose": false
}
""")
server.httpGet(
path = "/users?max=10&verbose=true",
andExpect = Ok,
withJsonBody = """
{
"max": 10,
"verbose": true
}
""")
server.httpGet(
path = "/users?verbose=5",
andExpect = BadRequest,
withJsonBody = """
{
"errors": [
"max is a required field",
"verbose's value '5' is not a valid boolean"
]
}
""")
}
}
| kaushik94/finatra | http/src/test/scala/com/twitter/finatra/http/integration/doeverything/test/DoEverythingServerFeatureTest.scala | Scala | apache-2.0 | 19,410 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{BaseShuffleHandle, IndexShuffleBlockResolver, ShuffleWriter}
import org.apache.spark.storage.ShuffleBlockId
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.ExternalSorter
private[spark] class SortShuffleWriter[K, V, C](
shuffleBlockResolver: IndexShuffleBlockResolver,
handle: BaseShuffleHandle[K, V, C],
mapId: Int,
context: TaskContext)
extends ShuffleWriter[K, V] with Logging {
private val dep = handle.dependency
private val blockManager = SparkEnv.get.blockManager
private var sorter: ExternalSorter[K, V, _] = null
// Are we in the process of stopping? Because map tasks can call stop() with success = true
// and then call stop() with success = false if they get an exception, we want to make sure
// we don't try deleting files, etc twice.
private var stopping = false
private var mapStatus: MapStatus = null
private val writeMetrics = context.taskMetrics().shuffleWriteMetrics
/** Write a bunch of records to this task's output */
override def write(records: Iterator[Product2[K, V]]): Unit = {
sorter = if (dep.mapSideCombine) {
new ExternalSorter[K, V, C](
context, dep.aggregator, Some(dep.partitioner), dep.keyOrdering, dep.serializer)
} else {
// In this case we pass neither an aggregator nor an ordering to the sorter, because we don't
// care whether the keys get sorted in each partition; that will be done on the reduce side
// if the operation being run is sortByKey.
new ExternalSorter[K, V, V](
context, aggregator = None, Some(dep.partitioner), ordering = None, dep.serializer)
}
sorter.insertAll(records)
// Don't bother including the time to open the merged output file in the shuffle write time,
// because it just opens a single file, so is typically too fast to measure accurately
// (see SPARK-3570).
val output = shuffleBlockResolver.getDataFile(dep.shuffleId, mapId)
val tmp = Utils.tempFileWith(output)
try {
val blockId = ShuffleBlockId(dep.shuffleId, mapId, IndexShuffleBlockResolver.NOOP_REDUCE_ID)
val partitionLengths = sorter.writePartitionedFile(blockId, tmp)
shuffleBlockResolver.writeIndexFileAndCommit(dep.shuffleId, mapId, partitionLengths, tmp)
mapStatus = MapStatus(blockManager.shuffleServerId, partitionLengths)
} finally {
if (tmp.exists() && !tmp.delete()) {
logError(s"Error while deleting temp file ${tmp.getAbsolutePath}")
}
}
}
/** Close this writer, passing along whether the map completed */
override def stop(success: Boolean): Option[MapStatus] = {
try {
if (stopping) {
return None
}
stopping = true
if (success) {
return Option(mapStatus)
} else {
return None
}
} finally {
// Clean up our sorter, which may have its own intermediate files
if (sorter != null) {
val startTime = System.nanoTime()
sorter.stop()
writeMetrics.incWriteTime(System.nanoTime - startTime)
sorter = null
}
}
}
}
private[spark] object SortShuffleWriter {
def shouldBypassMergeSort(conf: SparkConf, dep: ShuffleDependency[_, _, _]): Boolean = {
// We cannot bypass sorting if we need to do map-side aggregation.
if (dep.mapSideCombine) {
false
} else {
val bypassMergeThreshold: Int = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
dep.partitioner.numPartitions <= bypassMergeThreshold
}
}
}
| bravo-zhang/spark | core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala | Scala | apache-2.0 | 4,507 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io.{StringWriter, ByteArrayOutputStream}
import com.fasterxml.jackson.core._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.types.{StructField, StructType, StringType, DataType}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
import scala.util.parsing.combinator.RegexParsers
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\\\.\\\\[]+".r | "[\\\\'" ~> "[^\\\\'\\\\?]+" <~ "\\\\']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactory()
// Enabled for Hive compatibility
jsonFactory.enable(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS)
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import SharedFactory._
import PathInstruction._
import WriteStyle._
import com.fasterxml.jackson.core.JsonToken._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
Utils.tryWithResource(jsonFactory.createParser(jsonStr.getBytes)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[String] = {
fieldExpressions.map {
case expr if expr.foldable => expr.eval().asInstanceOf[UTF8String].toString
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementTypes: Seq[(DataType, Boolean, String)] = fieldExpressions.zipWithIndex.map {
case (_, idx) => (StringType, true, s"c$idx")
}
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
Utils.tryWithResource(jsonFactory.createParser(json.getBytes)) {
parser => parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val idx = fieldNames.indexOf(parser.getCurrentName)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
row(idx) = UTF8String.fromBytes(output.toByteArray)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
| chenc10/Spark-PAF | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala | Scala | apache-2.0 | 15,919 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.index
import com.netflix.atlas.core.model.Datapoint
import com.netflix.atlas.core.model.TimeSeries
import org.roaringbitmap.RoaringBitmap
class RoaringTagIndexSuite extends TagIndexSuite {
val index: TagIndex[TimeSeries] = {
new RoaringTagIndex(TagIndexSuite.dataset.toArray, new IndexStats())
}
test("empty") {
val idx = RoaringTagIndex.empty[Datapoint]
assert(idx.size == 0)
}
test("hasNonEmptyIntersection: empty, empty") {
val b1 = new RoaringBitmap()
val b2 = new RoaringBitmap()
assert(!RoaringTagIndex.hasNonEmptyIntersection(b1, b2))
}
test("hasNonEmptyIntersection: equal") {
val b1 = new RoaringBitmap()
b1.add(10)
val b2 = new RoaringBitmap()
b2.add(10)
assert(RoaringTagIndex.hasNonEmptyIntersection(b1, b2))
}
test("hasNonEmptyIntersection: no match") {
val b1 = new RoaringBitmap()
(0 until 20 by 2).foreach(b1.add)
val b2 = new RoaringBitmap()
(1 until 21 by 2).foreach(b2.add)
assert(!RoaringTagIndex.hasNonEmptyIntersection(b1, b2))
}
test("hasNonEmptyIntersection: last match") {
val b1 = new RoaringBitmap()
(0 until 22 by 2).foreach(b1.add)
val b2 = new RoaringBitmap()
(1 until 21 by 2).foreach(b2.add)
b2.add(20)
assert(RoaringTagIndex.hasNonEmptyIntersection(b1, b2))
}
}
| Netflix/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/index/RoaringTagIndexSuite.scala | Scala | apache-2.0 | 1,948 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* If the stats parser receives a string with multiple stats, a SeqStat will be used.
*
* @param sft simple feature type
* @param stats a Sequence of individual Stat objects
*/
class SeqStat(val sft: SimpleFeatureType, val stats: Seq[Stat]) extends Stat {
override type S = SeqStat
override def observe(sf: SimpleFeature): Unit = stats.foreach(_.observe(sf))
override def unobserve(sf: SimpleFeature): Unit = stats.foreach(_.unobserve(sf))
override def +(other: SeqStat): SeqStat = {
val builder = Seq.newBuilder[Stat]
builder.sizeHint(stats.length)
val iter = other.stats.iterator
stats.foreach { stat =>
if (iter.hasNext) {
builder += (stat + iter.next())
}
}
new SeqStat(sft, builder.result())
}
override def +=(other: SeqStat): Unit = {
val iter = other.stats.iterator
stats.foreach { stat =>
if (iter.hasNext) {
stat += iter.next()
}
}
}
override def toJsonObject: Seq[Any] = stats.map(_.toJsonObject)
override def isEmpty: Boolean = stats.forall(_.isEmpty)
override def clear(): Unit = stats.foreach(_.clear())
override def isEquivalent(other: Stat): Boolean = other match {
case that: SeqStat =>
stats.length == that.stats.length && {
val iter = that.stats.iterator
stats.forall(stat => iter.hasNext && stat.isEquivalent(iter.next))
}
case _ => false
}
}
| ddseapy/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/SeqStat.scala | Scala | apache-2.0 | 2,003 |
package notebook.front
import notebook._
import notebook.front.widgets._
import play.api.libs.json._
trait JsWorld[I, O] extends Widget with IODataConnector[I, O] {
def data: Seq[I]
def scripts: List[Script]
def snippets: List[String] = Nil
def content: Option[scala.xml.Elem] = None
val id = Math.abs(scala.util.Random.nextInt()).toString
lazy val json = JsonCodec.tSeq[O].decode(toO(data))
private lazy val js = ("playground" :: scripts.map(_.script)).map(
x => s"'../javascripts/notebook/$x'").mkString("[", ",", "]")
private lazy val call =
s"""
function(playground, ${scripts.map(_.name).mkString(", ")}) {
// data ==> data-this (in observable.js's scopedEval) ==> this in JS => { dataId, dataInit, ... }
// this ==> scope (in observable.js's scopedEval) ==> this.parentElement ==> div.container below (toHtml)
playground.call(data,
this
${if (scripts.size > 0) "," else ""}
${scripts.map(s => s.toJson).mkString(", ")}
${if (snippets.size > 0) "," else ""}
${snippets.mkString(", ")}
);
}
"""
lazy val toHtml = {
//class="container"
val container = <div>
{scopedScript(
s"req($js, $call);",
Json.obj(
"dataId" -> dataConnection.id,
"dataInit" -> json,
"genId" → id
)
)}
</div>
content.map(c => container.copy(child = container.child ++ c)).getOrElse(container)
}
}
class Playground[T](
override val data: Seq[T],
override val scripts: List[Script] = Nil,
override val snippets: List[String] = Nil,
override val content: Option[scala.xml.Elem] = None
)(implicit val singleCodec: Codec[JsValue, T]) extends JsWorld[T, T] {
override lazy val toO = identity[Seq[T]] _
val singleToO = identity[T] _
} | dragos/spark-notebook | modules/common/src/main/scala/notebook/front/Playground.scala | Scala | apache-2.0 | 1,921 |
package skuber
import java.util.Date
/**
* @author David O'Riordan
*/
case class Event(
val kind: String ="Event",
override val apiVersion: String = v1,
val metadata: ObjectMeta,
involvedObject: ObjectReference,
reason: Option[String] = None,
message: Option[String] = None,
source: Option[Event.Source] = None,
firstTimestamp: Option[Timestamp] = None,
lastTimestamp: Option[Timestamp] = None,
count: Option[Int] = None,
`type`: Option[String] = None)
extends ObjectResource
object Event {
val specification=CoreResourceSpecification(
scope = ResourceSpecification.Scope.Namespaced,
names = ResourceSpecification.Names(
plural="events",
singular="event",
kind="Event",
shortNames=List("ev")
)
)
implicit val evDef = new ResourceDefinition[Event] { def spec=specification }
implicit val evListDef = new ResourceDefinition[EventList] { def spec=specification }
case class Source(component: Option[String] = None, host: Option[String] = None)
} | doriordan/skuber | client/src/main/scala/skuber/Event.scala | Scala | apache-2.0 | 1,040 |
/*
Copyright (C) 2014-2020 Miquel Sabaté Solà <mikisabate@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.mssola.snacker.aqs
import com.mssola.snacker.core.{ Base, BaseComponent, Device, Devices, DeviceJSON }
import backtype.storm.topology.{ TopologyBuilder }
import net.liftweb.json._
/**
* @class AqsComponent
*
* Registers the AQS service.
*/
object AqsComponent extends BaseComponent {
override def cityId = Base.London
/**
* On initialization dump all the devices to Cassandra.
*/
override def initialize() = {
implicit val formats = DefaultFormats
val res = parse(devices().asString).extract[List[DeviceJSON]]
for (d <- res) {
val dev = new Device(d.deviceID.toInt, d.name, d.cityID.toInt,
d.longitude.toDouble, d.latitude.toDouble,
d.properties)
Devices.insertDevice(dev)
}
}
/**
* Simple topology is simple :)
*/
override def buildTopology(builder: TopologyBuilder) = {
builder.setSpout("aqss", new AqsSpout, 1)
builder.setBolt("aqsb", new AqsBolt, 8).shuffleGrouping("aqss")
}
}
| mssola/thesis | upc/snacker-aqs/src/main/scala/com/mssola/snacker/aqs/AqsComponent.scala | Scala | gpl-3.0 | 1,707 |
package org.mmg.scala.study
/**
* Created by mmedina on 14/09/08.
*/
object PatternMatching {
def basicCase(x: Int): String = x match {
case 1 => "one"
case 2 => "two"
case _ => "I can't count after 2!!!"
}
def displayHelp = "This is the Scala help line. How may I help you?"
def displayVersion = "Version 1.0"
def unknownArgument(arg: String) = "I don't know the meaning of the argument %s".format(arg)
def parseArgument(arg: String) = arg match {
case "-h" | "--help" => displayHelp
case "-v" | "--version" => displayVersion
case whatever => unknownArgument(whatever)
}
def typedPatternMatching(x: Any): String = x match {
case i:Int => "integer: " + i
case _:Double => "a double"
case s:String => "I want to say " + s
case _ => "This is not good"
}
/*
With Option
This will fail if the option is None. Scala will warn us
*/
def checkOption(o: Option[Int]): Int = o match {
case Some(number) => number // My homework!!! Variable binding (what is happening here?)
case _ => 0
}
// Length of a list
def lengthOfList(l: List[Any]): Int = l match {
case h :: t => 1 + lengthOfList(t)
case Nil => 0 // What is Nil? (Object? Class?)
}
// Even better
def betterLengthOfList(l: List[Any]): Int = l match {
case _ :: t => 1 + lengthOfList(t)
case _ => 0
}
/*
We can check for lists in different ways
*/
def checkMiddleWord(l: List[String]) = l match {
case h :: "dog" :: t => "The second element is dog"
case "He" :: _ :: "at" :: t => "Looks a like a sentence"
case _ => "Anything else"
}
def checkList(l: List[Any]) = l match {
case _ :: _ :: t => "The list has at least 2 elements"
case _ :: Nil => "The list has 1 element"
case _ => "Empty List"
}
// Using guards
def lengthWithGuards(l: List[Any]): Int = l match {
case l if l.nonEmpty => 1 + lengthWithGuards(l.tail)
case _ => 0
}
def oddOrEven(number: Int) = number match {
case 0 => "Zero, by definition, is even"
case x if x % 2 == 0 => 3
case _ => "odd"
}
// What happens with pattern matching?
// Let's create a class
class JIEMEmployee(val name: String, val dept: String) {
def doNothing = println("I'm not doing anything")
}
class JIEMManager(name: String, val role: String) extends JIEMEmployee(name, "Bosses") {
override def doNothing = println("I do a lot of work")
private def doSomething = println("jojojo")
}
// companion object
object JIEMEmployee {
// apply allows to create a new member of the class without using new
def apply(name: String, dept: String) = new JIEMEmployee(name, dept)
// unapply returns an Option[Tuple] with the elements that were used when creating the class
def unapply(jiememployee: JIEMEmployee) = Some((jiememployee.name, jiememployee.dept))
}
object JIEMManager {
def apply(name: String, role: String) = new JIEMManager(name, role)
def unapply(jiemManager: JIEMManager) = Some((jiemManager.name, jiemManager.dept, jiemManager.role))
}
case class AnEmployee(name: String, salary: Double)
// Note the "sealed" keyword
sealed trait Expression
object Plus extends Expression
object Minus extends Expression
object Times extends Expression
object Division extends Expression
def evaluateExpression(e: Expression): String = e match {
case Plus => "+"
case Minus => "-"
case Times => "*"
case Division => "/"
}
def getDepartmentFromEmployee(employee: JIEMEmployee) = employee match {
case JIEMManager(_,dept,role) => "Manager of %s with role %s".format(dept, role)
case JIEMEmployee(_,dept) => dept
}
def main(a: Array[String]): Unit = {
val Manuel = JIEMEmployee("Manuel", "Lab")
val Mikko = JIEMEmployee("Mikko", "Lab")
val Sano = JIEMManager("Sano", "System Boss") // No companion object
List(Manuel,Mikko,Sano) foreach {employee => println(getDepartmentFromEmployee(employee))}
}
}
| medinamanuel/ScalaStudy | src/main/scala/org/mmg/scala/study/PatternMatching.scala | Scala | apache-2.0 | 4,006 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.util
import silhouette.http.RequestPipeline
/**
* A generator which creates a fingerprint to identify a user.
*/
trait FingerprintGenerator {
/**
* Generates a fingerprint from request.
*
* @param request The request pipeline.
* @tparam R The type of the request.
* @return The generated fingerprint.
*/
def generate[R](implicit request: RequestPipeline[R]): String
}
| datalek/silhouette | silhouette/src/main/scala/silhouette/util/FingerprintGenerator.scala | Scala | apache-2.0 | 1,179 |
package com.twitter.finatra.http.benchmark
import com.twitter.finagle.Http.Server
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.stack.nilStack
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finatra.http.filters.HttpResponseFilter
import com.twitter.finatra.http.routing.HttpRouter
import com.twitter.finatra.http.{Controller, HttpServer}
object FinatraBenchmarkServerMain extends FinatraBenchmarkServer
class FinatraBenchmarkServer extends HttpServer {
override def configureHttpServer(server: Server): Server = {
server
.withCompressionLevel(0)
.withStatsReceiver(NullStatsReceiver)
.withStack(nilStack[Request, Response])
}
override def configureHttp(router: HttpRouter): Unit = {
router
.filter[HttpResponseFilter[Request]]
.add[FinatraBenchmarkController]
}
}
class FinatraBenchmarkController extends Controller {
get("/plaintext") { request: Request =>
"Hello, World!"
}
get("/") { request: Request =>
Map("message" -> "Hello, World!")
}
}
| twitter/finatra | examples/benchmark/src/main/scala/com/twitter/finatra/http/benchmark/FinatraBenchmarkServer.scala | Scala | apache-2.0 | 1,069 |
package jigg.util
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Properties
object ArgumentsParser {
def parse(args: List[String]): Properties = parseRecur(new Properties, args)
private def parseRecur(props: Properties, args: List[String]): Properties = args match {
case ArgKey(key) :: next => next match {
case ArgKey(nextKey) :: tail => // -key1 -key2 ... => key1 is boolean value
putTrue(props, key)
parseRecur(props, next)
case value :: tail =>
key match {
case "props" => props.load(jigg.util.IOUtil.openIn(value))
case _ => props.put(key, value)
}
parseRecur(props, tail)
case Nil =>
putTrue(props, key)
parseRecur(props, next)
}
case _ => props
}
def putTrue(props: Properties, key: String) = props.put(key, "true")
object ArgKey {
def unapply(key: String): Option[String] = key match {
case x if x.size > 1 && x(0) == '-' && x.drop(1).forall(x=>x.isDigit || x=='.') => None // -10.0, -1, etc are not key
case x if x.size > 1 && x(0) == '-' && x(1) == '-' => Some(x.substring(2))
case x if x.size > 1 && x(0) == '-' => Some(x.substring(1)) // we don't catch if x.size == 1, ('-' is recognized as some value)
case _ => None
}
}
}
| sakabar/jigg | src/main/scala/jigg/util/ArgumentsParser.scala | Scala | apache-2.0 | 1,836 |
import scala.quoted._
import scala.quoted.autolift
object Macros {
implicit inline def testMacro: Unit = ${impl}
def impl(using QuoteContext): Expr[Unit] = {
val buff = new StringBuilder
def stagedPrintln(x: Any): Unit = buff append java.util.Objects.toString(x) append "\\n"
Expr(3) match { case Const(n) => stagedPrintln(n) }
'{4} match { case Const(n) => stagedPrintln(n) }
'{"abc"} match { case Const(n) => stagedPrintln(n) }
'{null} match { case Const(n) => stagedPrintln(n) }
'{new Object} match { case Const(n) => println(n); case _ => stagedPrintln("OK") }
'{print(${buff.result()})}
}
}
| som-snytt/dotty | tests/run-macros/tasty-extractors-constants-1/quoted_1.scala | Scala | apache-2.0 | 642 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.cogmath.hypercircuit
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.MustMatchers
/** Test code for the Hyperedge class.
*
* @author Greg Snider and Dick Carter
*/
@RunWith(classOf[JUnitRunner])
class HyperedgeSpec extends FunSuite with MustMatchers {
test("all") {
class Node(inputEdges: Array[Hyperedge[Node]]) extends Hypernode[Node](inputEdges)
class Graph extends Hypercircuit[Node]
val graph = new Graph
val node0 = new Node(Array[Hyperedge[Node]]())
val node1 = new Node(Array[Hyperedge[Node]]())
val edge1 = new Hyperedge(node0)
val edge2 = new Hyperedge(node1)
val node2 = new Node(Array(edge1, edge2))
val node3 = new Node(Array(edge1))
require(node0.outputs(0).sinks.length == 2)
require(node1.outputs(0).sinks.length == 1)
require(node0.outputs(0).sinks contains node2)
require(node0.outputs(0).sinks contains node3)
require(node1.outputs(0).sinks contains node2)
require(node0.inputs.length == 0)
require(node1.inputs.length == 0)
require(node2.inputs.length == 2)
require(node3.inputs.length == 1)
require(node2.inputs.map(_.source) contains node0)
require(node2.inputs.map(_.source) contains node1)
require(node3.inputs.map(_.source) contains node0)
}
} | hpe-cct/cct-core | src/test/scala/cogx/cogmath/hypercircuit/HyperedgeSpec.scala | Scala | apache-2.0 | 1,985 |
package model.actuator.impl
import model.actuator.{Actuator, Command}
import model.location.Location
import mqtt.JsonSender
import play.api.Logger
/**
* Represents VV Blind controller
*/
case class VVBlindsActuator(
location: Location,
jsonSender: JsonSender
) extends Actuator {
override val name: String = "Blinds"
override def supportedCommands: Set[Command] = Set(
Command("Up", Seq.empty),
Command("Down", Seq.empty)
)
override def execute(command: Command): Unit = {
if(command.name == "Up" || command.name == "Down") {
Logger.info(s"VVBlindsActuator called and goes ${command.name}")
jsonSender.send(
s"node/${location.address}/vv-display/-/blinds/set",
s""""${command.name.toLowerCase()}""""
)
}
}
}
| vavravl1/home_center | app/model/actuator/impl/VVBlindsActuator.scala | Scala | mit | 843 |
package ee.cone.c4assemble
import ee.cone.c4assemble.Types._
import scala.annotation.tailrec
import scala.collection.immutable.{Map, Seq}
import scala.concurrent.{ExecutionContext, Future}
class LoopExpression[MapKey, Value](
outputWorldKey: AssembledKey,
wasOutputWorldKey: AssembledKey,
main: WorldPartExpression, // with DataDependencyTo[Index[MapKey, Value]],
continue: List[WorldPartExpression],
updater: IndexUpdater
)(composes: IndexUtil,
//val outputWorldKey: AssembledKey[Index[MapKey, Value]] = main.outputWorldKey,
continueF: WorldTransition⇒WorldTransition = Function.chain(continue.map(h⇒h.transform(_)))
) extends WorldPartExpression {
private def inner(
left: Int, transition: WorldTransition, resDiff: Index
): Future[IndexUpdate] = {
implicit val executionContext: ExecutionContext = transition.executionContext
for {
diffPart ← outputWorldKey.of(transition.diff)
res ← {
if(composes.isEmpty(diffPart)) for {
resVal ← outputWorldKey.of(transition.result)
} yield new IndexUpdate(resDiff, resVal, Nil)
else if(left > 0) inner(
left - 1,
main.transform(continueF(transition)),
composes.mergeIndex(Seq(resDiff, diffPart))
)
else throw new Exception(s"unstable local assemble ${transition.diff}")
}
} yield res
}
def transform(transition: WorldTransition): WorldTransition = {
val transitionA = main.transform(transition)
if(transition eq transitionA) transition
else finishTransform(transition, inner(1000, transitionA, emptyIndex))
}
def finishTransform(transition: WorldTransition, next: Future[IndexUpdate]): WorldTransition = {
implicit val executionContext: ExecutionContext = transition.executionContext
Function.chain(Seq(
updater.setPart(outputWorldKey,next,logTask = true),
updater.setPart(wasOutputWorldKey,next.map(update⇒new IndexUpdate(emptyIndex,update.result,Nil)),logTask = false)
))(transition)
}
}
class ShortAssembleSeqOptimizer(
composes: IndexUtil,
backStageFactory: BackStageFactory,
updater: IndexUpdater
) extends AssembleSeqOptimizer {
private def getSingleKeys[K]: Seq[K] ⇒ Set[K] = _.groupBy(i⇒i).collect{ case (k,Seq(_)) ⇒ k }.toSet
def optimize: List[Expr]⇒List[WorldPartExpression] = expressionsByPriority ⇒ {
val singleOutputKeys: Set[AssembledKey] = getSingleKeys(expressionsByPriority.map(_.outputWorldKey))
val singleInputKeys = getSingleKeys(expressionsByPriority.flatMap(_.inputWorldKeys))
expressionsByPriority.map{ e ⇒ e.outputWorldKey match {
case key:JoinKey ⇒
val wKey = key.withWas(was=true)
if(
singleOutputKeys(key) && singleInputKeys(wKey) &&
e.inputWorldKeys.contains(wKey)
) new LoopExpression[Any,Any](
key, wKey, e, backStageFactory.create(List(e)), updater
)(composes)
else e
}}
}
}
class NoAssembleSeqOptimizer() extends AssembleSeqOptimizer {
def optimize: List[Expr]⇒List[WorldPartExpression] = l⇒l
}
| wregs/c4proto | c4assemble-runtime/src/main/scala/ee/cone/c4assemble/AssembleSeqOptimizerImpl.scala | Scala | apache-2.0 | 3,089 |
package opencl.generator
import ir.ast._
import ir.{ArrayTypeWSWC, TupleType}
import opencl.executor.{Execute, TestWithExecutor}
import opencl.ir._
import opencl.ir.pattern._
import org.junit.Assert._
import org.junit._
object TestUnrollAndInlineValues extends TestWithExecutor
class TestUnrollAndInlineValues {
val delta = 0.00001f
val M = 6
val N = 4
val O = 8
val tftii_id = UserFun("nestedtuple_id", "x", "return x;", TupleType(Float, TupleType(Int, Int)), TupleType(Float, TupleType(Int, Int)))
val tftitff_id = UserFun("nestednestedtuple_id", "x", "return x;", TupleType(Float, TupleType(Int, TupleType(Float, Float))), TupleType(Float, TupleType(Int, TupleType(Float, Float))))
def runUnrolledIndexTest(inputString: String, returnIdx: Int, returnSuffix: String): Unit = {
val unrolled = UnrollValues.getIndexSuffix(inputString)
assertEquals(unrolled._1, returnIdx)
assertEquals(unrolled._2, returnSuffix)
}
// index tests
@Test
def testNoSuffix(): Unit = {
var inputString: String = ""
var returnIdx: Int = -1
var returnSuffix: String = ""
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testSimpleSuffix(): Unit = {
var inputString: String = "_7"
var returnIdx: Int = 7
var returnSuffix: String = ""
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testTuple(): Unit = {
var inputString: String = "._1"
var returnIdx: Int = 1
var returnSuffix: String = ""
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testTupleSuffix(): Unit = {
var inputString: String = "_2._1"
var returnIdx: Int = 2
var returnSuffix: String = "._1"
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testMultiDSuffix(): Unit = {
var inputString: String = "_2_2_5"
var returnIdx: Int = 2
var returnSuffix: String = "_2_5"
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testMultiDSuffixWithTuple(): Unit = {
var inputString: String = "_2_2._1"
var returnIdx: Int = 2
var returnSuffix: String = "_2._1"
runUnrolledIndexTest(inputString, returnIdx, returnSuffix)
}
@Test
def testUnrollPrivateArrayOfStructs(): Unit = {
/* Arr[Tuple(float,int)] */
val ISflag = InlineStructs()
InlineStructs(true)
val data = Array.tabulate(N) { (i) => (i + 1).toFloat }
val input = (data zip data)
val compare = (data zip data).toVector
val lambda = fun(
ArrayTypeWSWC(TupleType(Float, Float), N),
(A) =>
toGlobal(MapSeq(tf_id)) o toPrivate(MapSeq(tf_id)) $ A)
val (output, _) = Execute(N, N)[Vector[(Float, Float)]](lambda, input)
assertEquals(compare, output)
InlineStructs(ISflag)
}
@Test
def testUnrollPrivateArrayOfStructsOfStructs(): Unit = {
/* Arr[Tuple(float,Tuple(int,int))] */
val ISflag = InlineStructs()
InlineStructs(true)
val data = Array.tabulate(N) { (i) => (i + 1).toFloat }
val input = (data zip (data.map(_.toInt) zip data.map(_.toInt)))
val compare = (data zip (data.map(_.toInt) zip data.map(_.toInt))).toVector
val lambda = fun(
ArrayTypeWSWC(TupleType(Float, TupleType(Int, Int)), N),
(A) =>
toGlobal(MapSeq(tftii_id)) o toPrivate(MapSeq(tftii_id)) $ A)
val (output, _) = Execute(N, N)[Vector[(Float, (Int, Int))]](lambda, input)
assertEquals(compare, output)
InlineStructs(ISflag)
}
@Test
def testUnrollPrivateArrayOfStructsOfStructsOfStructs(): Unit = {
/* Arr[Tuple(float,Tuple(int,Tuple(float,float)))] */
val ISflag = InlineStructs()
InlineStructs(true)
val data = Array.tabulate(N) { (i) => (i + 1).toFloat }
val input = (data zip (data.map(_.toInt) zip (data zip data)))
val compare = input.toVector
val lambda = fun(
ArrayTypeWSWC(TupleType(Float, TupleType(Int, TupleType(Float, Float))), N),
(A) =>
toGlobal(MapSeq(tftitff_id)) o toPrivate(MapSeq(tftitff_id)) $ A)
val (output, _) = Execute(N, N)[Vector[(Float, (Int, (Float, Float)))]](lambda, input)
assertEquals(compare, output)
InlineStructs(ISflag)
}
@Test
def testUnrollPrivateArraysOfPrivateArrays(): Unit = {
/* Arr[Arr[float]]) */
val data = Array.tabulate(M, N) { (i, j) => (i + j + 1).toFloat }
val gold: Array[Float] = data.flatten
val lambda = fun(
ArrayTypeWSWC(ArrayTypeWSWC(Float, N), M),
(A) =>
toGlobal(MapSeq(MapSeq(id))) o toPrivate(MapSeq(MapSeq(id))) $ A)
val (output, _) = Execute(2, 2)[Array[Float]](lambda, data)
assertArrayEquals(gold, output, delta)
}
@Test
def testUnrollPrivateArraysOfPrivateArraysOfPrivateArrays(): Unit = {
/* Arr[Arr[Arr[float]]]) */
val data = Array.tabulate(M, N, O) { (i, j, k) => (i + j + k + 1).toFloat }
val gold: Array[Float] = data.flatten.flatten
val lambda = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(Float, O), N), M),
(A) =>
toGlobal(MapSeq(MapSeq(MapSeq(id)))) o toPrivate(MapSeq(MapSeq(MapSeq(id)))) $ A)
val (output, _) = Execute(2, 2)[Array[Float]](lambda, data)
assertArrayEquals(gold, output, delta)
}
@Test
def testUnrollPrivateArraysOfPrivateArraysOfPrivateArraysofStructs(): Unit = {
/* Arr[Arr[Arr[Tuple(float, float)]]]) */
val ISflag = InlineStructs()
InlineStructs(true)
val data3D = Array.tabulate(M, N, O) { (i, j, k) => ((i + j + k + 1).toFloat, (i + j + k + 1).toFloat) }
val gold3D: Vector[(Float, Float)] = data3D.flatten.flatten.toVector
val lambda3D = fun(
ArrayTypeWSWC(ArrayTypeWSWC(ArrayTypeWSWC(TupleType(Float, Float), O), N), M),
(A) =>
toGlobal(MapSeq(MapSeq(MapSeq(tf_id)))) o toPrivate(MapSeq(MapSeq(MapSeq(tf_id)))) $ A)
val (output, _) = Execute(2, 2)[Vector[Vector[Vector[(Float, Float)]]]](lambda3D, data3D)
assertEquals(gold3D, output.flatten.flatten)
InlineStructs(ISflag)
}
}
| lift-project/lift | src/test/opencl/generator/TestUnrollAndInlineValues.scala | Scala | mit | 6,031 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.core.structure
import de.mineformers.core.util.world.BlockPos
import net.minecraft.block.Block
import net.minecraft.init.Blocks
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.util.EnumFacing
import net.minecraft.world.World
/**
* Layer
*
* @author PaleoCrafter
*/
class Layer(val width: Int, val length: Int) {
private[structure] val blocks = Array.ofDim[BlockInfo](width, length)
def set(x: Int, z: Int, block: Block, metadata: Int) {
set(x, z, block, metadata, null)
}
def set(x: Int, z: Int, block: Block, metadata: Int, tileEntity: NBTTagCompound) {
val clampedX: Int = clampX(x)
val clampedZ: Int = clampZ(z)
set(new SimpleBlockInfo(block, clampedX, clampedZ, (metadata & 0xF).asInstanceOf[Byte], tileEntity))
}
def set(info: BlockInfo) {
blocks(info.x)(info.z) = info
}
def remove(x: Int, z: Int) {
blocks(clampX(x))(clampZ(z)) = null
}
def get(x: Int, z: Int): BlockInfo = {
if (x >= width || z >= length || x < 0 || z < 0) return null
var info: BlockInfo = blocks(x)(z)
if (info == null) {
info = new SimpleBlockInfo(Blocks.air, x, z, 0.asInstanceOf[Byte])
set(info)
}
info
}
private def clampX(x: Int): Int = math.min(math.max(x, 0), width - 1)
private def clampZ(z: Int): Int = math.min(math.max(z, 0), length - 1)
def copy: Layer = {
val layer: Layer = new Layer(width, length)
for (x <- 0 until width; z <- 0 until length) {
val info = get(x, z)
if (info != null) {
layer.set(info.copy)
}
}
layer
}
override def toString: String = {
var s: String = "Layer=["
s += blocks.deep.mkString(",")
s += "]"
s
}
}
abstract class BlockInfo(private var _x: Int, private var _z: Int) {
def x = _x
def z = _z
private[structure] def x_=(x: Int): Unit = _x = x
private[structure] def z_=(z: Int): Unit = _z = z
def getEntry: BlockEntry
def getBlock = getEntry.block
def getMetadata = getEntry.metadata
def getTileEntity = getEntry.tile
def setBlock(block: Block): Unit = getEntry.block = block
def setMetadata(metadata: Int): Unit = getEntry.metadata = (metadata & 0xF).toByte
def setTile(tile: NBTTagCompound): Unit = getEntry.tile = tile
def update(world: StructureWorld, y: Int): Unit = ()
def getTranslatedTileEntity(x: Int, y: Int, z: Int): NBTTagCompound = {
if (getEntry.tile != null) {
val translated: NBTTagCompound = getEntry.tile.copy.asInstanceOf[NBTTagCompound]
translated.setInteger("x", translated.getInteger("x") + x)
translated.setInteger("y", translated.getInteger("y") + y)
translated.setInteger("z", translated.getInteger("z") + z)
translated
} else
null
}
def rotate(world: World, y: Int, axis: EnumFacing): Unit
def copy: BlockInfo
override def toString: String = "BlockInfo=[pos=(" + x + "," + z + "), block=" + this.getEntry.block + ", metadata=" + this.getEntry.metadata + "]"
}
class SimpleBlockInfo(private var block: Block, _x: Int, _z: Int, metadata: Byte, tile: NBTTagCompound) extends BlockInfo(_x, _z) {
if (tile != null) {
tile.setInteger("x", x)
tile.setInteger("z", z)
}
private var entry = BlockEntry(block, metadata, tile)
def getEntry = entry
def this(block: Block, x: Int, z: Int, metadata: Byte) = this(block, x, z, metadata, null)
override def rotate(world: World, y: Int, axis: EnumFacing): Unit = {
block.rotateBlock(world, BlockPos(x, y, z), axis)
entry = BlockEntry(block, metadata, tile)
}
def copy: BlockInfo = new SimpleBlockInfo(block, x, z, metadata, tile)
}
object BlockEntry {
def apply(block: Block, metadata: Int): BlockEntry = apply(block, (metadata & 0xF).toByte, null)
}
case class BlockEntry(var block: Block, var metadata: Byte, var tile: NBTTagCompound) {
def this(block: Block, metadata: Byte) = this(block, metadata, null)
} | MineFormers/MFCore | src/main/scala/de/mineformers/core/structure/layer.scala | Scala | mit | 5,064 |
package com.atomicscala.trivia
class Movies
class Science
class Literature
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/14_ImportsAndPackages-2ndEdition/Trivia.scala | Scala | apache-2.0 | 78 |
package com.github.probe.backend
import com.typesafe.config.Config
import akka.actor.{ ExtendedActorSystem, ExtensionIdProvider, ExtensionId, Extension }
abstract class SubSettings(parentConfig: Config, subTreeName: String) {
val config = parentConfig.getConfig(subTreeName)
}
class SettingsImpl(config: Config) extends Extension {
val restApi = new SubSettings(config, "rest-api") {
val port = config.getInt("port")
}
val android = new SubSettings(config, "android") {
val port = config.getInt("port")
val zeroConfInstanceName = config.getString("zeroconf-name")
}
}
object Settings extends ExtensionId[SettingsImpl] with ExtensionIdProvider {
override def lookup() = Settings
override def createExtension(system: ExtendedActorSystem) = new SettingsImpl(system.settings.config.getConfig("probe"))
}
| khernyo/freezing-ninja | backend/src/main/scala/com/github/probe/backend/Settings.scala | Scala | apache-2.0 | 832 |
package examples
import misc.Members
trait ExampleTrait {
def method1: Unit
def method2: Int
val val1: String
var var1: String
protected val privateVal: Int
}
object Example1 extends App {
Members.iteratePublicMembers[ExampleTrait] foreach println
}
| davidhoyt/scala-macro-experiments | misc/src/main/scala/examples/Example1.scala | Scala | mit | 265 |
package jsky.app.ot.gemini.editor.auxfile
import edu.gemini.spModel.core.SPProgramID
import edu.gemini.auxfile.client.AuxFileClient
import edu.gemini.auxfile.api.AuxFileException
import jsky.app.ot.ui.util.{ProgressDialog, ProgressModel}
import jsky.app.ot.util.Resources
import java.io.File
import scala.swing._
import scala.swing.Swing.onEDT
import AuxFileAction.transferListener
class AddAction(c: Component, model: AuxFileModel) extends AuxFileAction("Add", c, model) {
icon = Resources.getIcon("eclipse/add.gif")
toolTip = "Add a new file attachment."
override def interpret(ex: AuxFileException) =
s"Sorry, the was a problem attaching files: '${ex.getMessage}'"
private def prompt: Option[List[File]] = {
val fc = new FileChooser(dirPreference.orNull)
fc.title = "Choose File to Attach (Upload)"
fc.fileSelectionMode = FileChooser.SelectionMode.FilesOnly
fc.multiSelectionEnabled = true
fc.peer.setApproveButtonMnemonic('a')
fc.peer.setApproveButtonToolTipText("Upload selected file.")
fc.showDialog(c, "Attach") match {
case FileChooser.Result.Approve =>
fc.selectedFiles.toList match {
case Nil => None
case files =>
dirPreference = Some(files.head.getParentFile)
Some(files)
}
case _ => None
}
}
private def validate(files: List[File]): Option[List[File]] =
files.filter(_.length > MaxFileSize) match {
case Nil => Some(files)
case List(f) =>
Dialog.showMessage(c, s"The file '${f.getName}' is larger than the limit ($MaxFileSize MB).")
None
case fs =>
Dialog.showMessage(c, s"The files ${fs.map(_.getName).mkString("'","', '", "'")} are larger than the limit ($MaxFileSize MB)" )
None
}
private def store(client: AuxFileClient, pid: SPProgramID, file: File) {
val pm = new ProgressModel(s"Attaching file ${file.getName}", 100)
val pd = new ProgressDialog(jFrame.orNull, s"Attach ${file.getName}", false, pm)
onEDT(pd.setVisible(true))
try {
client.store(pid, file.getName, file, transferListener(pm))
} finally {
onEDT {
pd.setVisible(false)
pd.dispose()
}
}
}
override def apply() {
exec(prompt.flatMap(validate)) { (client, pid, files) =>
files foreach { store(client, pid, _) }
}
}
} | arturog8m/ocs | bundle/jsky.app.ot/src/main/scala/jsky/app/ot/gemini/editor/auxfile/AddAction.scala | Scala | bsd-3-clause | 2,380 |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.feel.impl.interpreter
import org.camunda.feel.FeelEngineClock
import org.camunda.feel.context.FunctionProvider
import org.camunda.feel.impl.builtin.{
BooleanBuiltinFunctions,
ContextBuiltinFunctions,
ConversionBuiltinFunctions,
ListBuiltinFunctions,
NumericBuiltinFunctions,
StringBuiltinFunctions,
TemporalBuiltinFunctions,
RangeBuiltinFunction
}
import org.camunda.feel.syntaxtree.ValFunction
class BuiltinFunctions(clock: FeelEngineClock) extends FunctionProvider {
override def getFunctions(name: String): List[ValFunction] =
functions.getOrElse(name, List.empty)
override def functionNames: Iterable[String] = functions.keys
val functions: Map[String, List[ValFunction]] =
ConversionBuiltinFunctions.functions ++
BooleanBuiltinFunctions.functions ++
StringBuiltinFunctions.functions ++
ListBuiltinFunctions.functions ++
NumericBuiltinFunctions.functions ++
ContextBuiltinFunctions.functions ++
RangeBuiltinFunction.functions ++
new TemporalBuiltinFunctions(clock).functions
}
| camunda/feel-scala | src/main/scala/org/camunda/feel/impl/interpreter/BuiltinFunctions.scala | Scala | apache-2.0 | 1,887 |
package com.rcirka.play.dynamodb.dao
import com.rcirka.play.dynamodb.exception.ResourceNotFoundException
import com.rcirka.play.dynamodb.models.enums.{QuerySelect, KeyType}
import com.rcirka.play.dynamodb.models.{KeyCondition, AttributeDefinition}
import com.rcirka.play.dynamodb.models.indexes.{TableIndex, AttributeIndex}
import com.rcirka.play.dynamodb.requests.{QueryRequest, CreateTableRequest}
import com.rcirka.play.dynamodb.results.DescribeTableResult
import com.rcirka.play.dynamodb.{DynamoDBClient, DynamoDbWebService}
import play.api.libs.json._
import scala.concurrent.{ExecutionContext, Await, Future}
import scala.concurrent.duration._
import scala.util.{Failure, Success}
import com.rcirka.play.dynamodb.utils.SeqUtils.SeqHelper
import com.rcirka.play.dynamodb.utils.JsonHelper._
import com.rcirka.play.dynamodb.Dsl._
abstract class BaseDynamoDao[Model: Format](
val client: DynamoDBClient,
val tableName: String,
val keySchema: Seq[AttributeIndex],
val globalSecondaryIndexes: Seq[TableIndex] = Nil,
val localSecondaryIndexes: Seq[TableIndex] = Nil,
val attributeDefinitions: Seq[AttributeDefinition],
val blockOnTableCreation : Boolean = false // Primary meant for unit tests
)(implicit ec: ExecutionContext) {
val webService = DynamoDbWebService(client)
val tableNameJson = Json.obj("TableName" -> tableName)
private val primaryKey : String =
keySchema.find(_.keyType == KeyType.Hash).map(_.attributeName)
.getOrElse(throw new Exception("Primary key must be defined"))
if (blockOnTableCreation) createTableIfMissingAndBlock() else createTableIfMissing()
// TODO: This will probably fail in AWS due to the default future timeout of 10 seconds. Need to modify future timeout to make this work.
private def createTableIfMissingAndBlock(): Unit = {
// Block thread for table creation
val exists = Await.result(tableExists(), 30 seconds)
if (!exists) {
Await.result(new GlobalDynamoDao(client).createTableOnComplete(
CreateTableRequest(
tableName,
keySchema = keySchema,
globalSecondaryIndexes = globalSecondaryIndexes.toOption,
localSecondaryIndexes = localSecondaryIndexes.toOption,
attributeDefinitions = attributeDefinitions
)
), 30 seconds)
}
}
private def createTableIfMissing(): Unit = {
tableExists().onComplete{
case Success(exists) => {
if (!exists) {
new GlobalDynamoDao(client).createTable(
CreateTableRequest(
tableName,
keySchema = keySchema,
globalSecondaryIndexes = globalSecondaryIndexes.toOption,
localSecondaryIndexes = localSecondaryIndexes.toOption,
attributeDefinitions = attributeDefinitions
)
).onComplete {
case Success(e) => ()
case Failure(e) => ()
}
}
}
case Failure(e) => ()
}
}
def tableExists() : Future[Boolean] = {
webService
.post("DynamoDB_20120810.DescribeTable", tableNameJson)
.map(_ => true)
.recover {
case e: ResourceNotFoundException => false
}
}
def deleteTable() : Future[Unit] = {
webService.post("DynamoDB_20120810.DeleteTable", tableNameJson).map(x => ())
}
def getAll() : Future[Seq[Model]] = {
webService.scan(tableNameJson).map { result =>
result.map(_.as[Model])
}
}
/**
* Find one by primary key.
* @param value
* @tparam T
* @return
*/
def get[T: Writes](value: T) : Future[Option[Model]] = {
val json = Json.obj(
"Key" -> Json.obj(
primaryKey -> value
)
) ++ tableNameJson
webService.getItem(json).map { _.map(_.as[Model]) }
}
/**
* Find one by key and range
* @param hash
* @param range
* @tparam A
* @tparam B
* @return Future[Option[Model]]
*/
def findOne[A: Writes, B: Writes](hash: A, range: B) : Future[Option[Model]] = ???
def put(model: Model) = {
val json = Json.obj(
"Item" -> model
) ++ tableNameJson
webService.putItem(json).map(x => ())
}
def delete[A: Writes](value: A) : Future[Unit] = {
val json = Json.obj(
"Key" -> Json.obj(
primaryKey -> wrapItemVal(Json.toJson(value))
)
) ++ tableNameJson
webService.post("DynamoDB_20120810.DeleteItem", json).map(x => ())
}
def query[A: Writes](keyValue: A, rangeCondition: KeyCondition) : Future[Seq[Model]] = {
val request = QueryRequest(tableName, keyConditions = Seq(primaryKey $eq keyValue, rangeCondition))
webService.post("DynamoDB_20120810.Query", Json.toJson(request)).map { result =>
val itemsJson = (result.json \ "Items").asOpt[Seq[JsObject]]
itemsJson.map(_.map(unwrapItem(_).as[Model])).getOrElse(Nil)
}
}
def queryByIndex(index: String, hashCondition: KeyCondition) : Future[Seq[Model]] = {
val request = QueryRequest(tableName, Some(index), Seq(hashCondition))
webService.post("DynamoDB_20120810.Query", Json.toJson(request)).map { result =>
val itemsJson = (result.json \ "Items").asOpt[Seq[JsObject]]
itemsJson.map(_.map(unwrapItem(_).as[Model])).getOrElse(Nil)
}
}
def queryByIndex(index: String, hashCondition: KeyCondition, rangeCondition: KeyCondition) : Future[Seq[Model]] = {
val request = QueryRequest(tableName, Some(index), Seq(hashCondition, rangeCondition))
webService.post("DynamoDB_20120810.Query", Json.toJson(request)).map { result =>
val itemsJson = (result.json \ "Items").asOpt[Seq[JsObject]]
itemsJson.map(_.map(unwrapItem(_).as[Model])).getOrElse(Nil)
}
}
def exists(index: String, keyCondition: KeyCondition): Future[Boolean] = {
val request = QueryRequest(tableName, Some(index), Seq(keyCondition), Some(QuerySelect.Count))
webService.post("DynamoDB_20120810.Query", Json.toJson(request)).map { response =>
(response.json \ "Count").as[Int] > 0
}
}
def count(index: String, keyCondition: KeyCondition): Future[Int] = {
val request = QueryRequest(tableName, Some(index), Seq(keyCondition), Some(QuerySelect.Count))
webService.post("DynamoDB_20120810.Query", Json.toJson(request)).map { response =>
(response.json \ "Count").as[Int]
}
}
}
| rcirka/Play-DynamoDB | src/main/scala/com/rcirka/play/dynamodb/dao/BaseDynamoDao.scala | Scala | mit | 6,283 |
/**
* Copyright (C) 2015-2016 Philipp Haller
*/
package lacasa.run
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
@RunWith(classOf[JUnit4])
class ControlSpec {
import scala.util.control.ControlThrowable
import lacasa.Box._
@Test
def test1(): Unit = {
println("run.ControlSpec.test1")
val res = try { 5 } catch {
case c: ControlThrowable =>
throw c
case t: Throwable =>
println("hello")
}
assert(res == 5, "this should not fail")
}
}
| phaller/lacasa | plugin/src/test/scala/lacasa/run/Control.scala | Scala | bsd-3-clause | 532 |
package com.jeffharwell.commoncrawl.warcparser
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
/*
* Call the implements a WARC Record Filter
*
* At present this class applies some basic rules to the content of the WARC Record
* and returns true if it matches false otherwise.
*/
class MyWARCFilter() extends java.io.Serializable {
/*
* MyWARCFilter
*
* This is not a general class. This is an implementation of my own filter requirements
* The basic idea is that I am looking for keywords that, one, occur a certain number of
* times and, two, occur in sentences of certain size.
*
* This filter is just the first cut, sentence parsing and relational tuple extraction
* come next, so it is more important that this filter be fast than that it be accurate.
* I expect it to need to filter around 1 billion documents and extract the .1% that are
* actually interesting for my research.
*/
val keywords = List[String]("trump","clinton","presidential","asthma")
// controls how many times a keyword must be mentioned in the
// content before it is considered a match. Defaults to 7.
var minimummentions = 7
var debug = false
var debug_message_length = 500000
// this contols the maximum number of chunks that the detailed test with check for
// the presence of keywords.
var max_chunks_to_check = 5000
// Setter for Minimum Mentions
def setMinMentions(i :Int): Unit = {
minimummentions = i
}
// Getter for Minimum Mentions
def getMinMentions(): Int = {
return(minimummentions)
}
/*
* Sets the debug flag. This will result in the code writing debug messages
* to the standard output. The debug messages are designed to help with
* performance profiling
*/
def setDebug() {
debug = true
println("Debugging is on for MyWARCFilter")
println(s" Debugging output for records where length > ${debug_message_length}")
println(s" Will only check a maximum of ${max_chunks_to_check} chunks when doing a detailed check.")
}
/*
* Sets the message length at which debugging messages will be written
* when setDebug has been called
*/
def setDebugMessageLength(l: Int): Unit = {
debug_message_length = l
}
/*
* The filter will only check a certain number of chunks when doing a detailed check. Once you get
* over 100,000 it can take several minutes to do the detailed check. The code defaults to 40,000
* which can take over a minute to process on my laptop.
* It also indirectly controls some debugging output. The debug
* output from the detailed check will not print if the number of chunks is less than this
* threshold.
*/
def setMaxChunksToCheck(m: Int): Unit = {
max_chunks_to_check = m
}
/*
* A sweet bit of Scala syntactic sugar
* apply() is the function that gets called when an object is used
* as a function (not quite accurate but close). This method is
* called when:
*
* val myfilter = new MyWARCFilter()
* myfilter(mywarcrecord)
*
* See: http://stackoverflow.com/questions/9737352/what-is-the-apply-function-in-scala
*
* [A <: WARCRecord] means that this method requires a subtype of WARCRecord as an argument
*
* @param w a WARCRecord to filter
* @return a boolean, either the record passed the filter or doesn't
*/
def apply[A <: WARCRecord](w: A): Boolean = {
// containsKeywords is the simple check, just does a fast pattern match for each
// keyword in the content and returns true if any keyword passes a minimum number
// of mentions
//
// detailCheck is the more expensive check
if (debug && w.fields("Content").length > debug_message_length) { println(s"Processing Record ${w.get("WARC-Record-ID")} of length ${w.fields("Content").length}") }
var start_time = System.currentTimeMillis()
if (containsKeywords(w.fields("Content"), minimummentions)) {
var end_time = System.currentTimeMillis()
if (debug && w.fields("Content").length > debug_message_length) { println(s"Running containsKeywords took ${end_time - start_time} ms") }
detailCheck(w)
} else {
false
}
}
def detailCheck[A <: WARCRecord](wrecord: A): Boolean = {
// Now the more extensive check, if anything matched split the content
// into lines and make sure there is at least one match on a line where
// the wordcount is greater than 7 (I just made that number up) but less than 50
// (made that up too) because I want to filter out spam keyword stuffing.
//
// I could probably do something clever like have the ciMatch also return
// the indicies of the match in the content, then I split the content by ". " look at
// the starting and ending indices (have to screw with it a bit to account for
// the missing ". ") and then count the words for the matching strings
// Hmm, lets try this first
// Split content by newlines into chunks
// Split chunks by spaces into "words"
// Throw out the chunks that don't match our criteria
// Run cimatch over the remaining chunks
var start_time = System.currentTimeMillis()
/* The functional one liner, it is A LOT slower than the imperative version below with uses the list buffer
* 33236 ms to generate 46,000 chunks vs 39 ms for the imperative version
*
val chunks = wrecord.fields("Content").split("\\\\n").foldLeft(List[String]()) { (x, t) => x ++ t.split("\\\\. ") }
*/
/*
* The imperative version
*/
val newline_chunks: Array[String] = wrecord.fields("Content").split("\\\\n")
val chunks: ListBuffer[String] = new ListBuffer()
newline_chunks.foreach { x =>
chunks.appendAll(x.split("\\\\. "))
}
var end_time = System.currentTimeMillis()
if (debug && chunks.length > max_chunks_to_check) { println(s"Generated ${chunks.length} chunks in ${end_time - start_time} ms") }
// checkChunks is expensive and will run for a long time if you process tens of thousands or hundreds of thousands
// of chunks, especially of those chunks are small.
//
// We only want to do a detailed check on things that might actually be sentences. Since we split by newline and then periods it
// seems reasonable to say that we one only interested in chunks that have between 7 and 50 words.
def chunkFilter(chunk: String): Boolean = {
val chunksize = chunk.split(" ").filter { y => y != "" }.length // filter, otherwise each empty string counts in the length, not what we want
if (chunksize > 7 && chunksize < 50) {
true
} else {
false
}
}
val valid_chunks = chunks.filter(chunkFilter(_))
// We might still get a really large number of chunks to check, we are only going to process the first X thousand
// We don't want to run for more than a few minutes or it might cause problems, especially if this is being used
// as the filter on a network stream .. things will start timing out.
if (debug && chunks.length > max_chunks_to_check) { println(s"Found ${valid_chunks.length} chunks of appropriate size to check") }
// Calculate the chunk number to which we will process before quiting based on the max_chunks_to_check
var min = {
if (valid_chunks.length > max_chunks_to_check) {
valid_chunks.length - max_chunks_to_check
} else {
0
}
}
if (debug && valid_chunks.length > max_chunks_to_check) { println(s"Only searching to depth ${min}") }
@tailrec
def checkChunks(chunks: ListBuffer[String], min: Int): Boolean = {
val chunk = chunks.head
val t = chunks.tail
val t_size = t.size
if (t_size <= min) { false }
else if (t_size == 0) { false }
else if (containsKeywords(chunk, 1)) { true }
else { checkChunks(t, min) }
}
start_time = System.currentTimeMillis()
// don't pass checkChunks an empty list, it bombs out on chunks.head
var has_mentions: Boolean = { if (valid_chunks.length == 0) { false }
else { checkChunks(valid_chunks, min) }
}
end_time = System.currentTimeMillis()
if (debug && chunks.length > max_chunks_to_check) { println(s"checkChunks ran in ${end_time - start_time} ms") }
has_mentions
}
def containsKeywords(tocheck: String, mentions: Int): Boolean = {
@tailrec
def checkKeyword(tocheck: String, keywordlist: List[String]): Boolean = {
val keyword = keywordlist.head
val t = keywordlist.tail
if (ciMatch(tocheck, keyword) >= mentions) { true }
else if (t.size == 0) { false }
else { checkKeyword(tocheck, t) }
}
checkKeyword(tocheck, keywords)
}
/* ciMatch
*
* This is a psudo optimized implementation of a caseInsensitive matching algorithm
* Converted from some plain old nasty Java derived from @icza 's answer to
* http://stackoverflow.com/questions/86780/how-to-check-if-a-string-contains-another-string-in-a-case-insensitive-manner-in
*
* @param src the string to search for a match
* @param what the string to match
* @returns int the number of matches
*/
def ciMatch(src: String, what: String): Int = {
val length: Int = what.size
var matches = 0
var lowerwhat = what.toLowerCase()
if (length == 0)
return src.size // a bit arbitrary here, you could say a String contains
// string.size empty strings ... or infinite .. whatever
// it just seems a bit punative to throw a runtime error
val firstLo = Character.toLowerCase(what.charAt(0))
val firstUp = Character.toUpperCase(what.charAt(0))
for (i <- 0 to src.size - length) {
val ch = src.charAt(i)
if (ch == firstLo || ch == firstUp) {
/*
if (lowerwhat == src.slice(i, i+length).toLowerCase()) {
matches += 1
}
*/
// regionMatches runs one or two seconds faster overall on a WARC file than the src.slice version above
if (src.regionMatches(true, i, what, 0, length)) {
matches += 1
}
}
}
return matches
}
}
| jeffharwell/CommonCrawlScalaTools | warcparser/src/main/scala/com/jeffharwell/commoncrawl/warcparser/MyWARCFilter.scala | Scala | mit | 10,221 |
package org.jetbrains.plugins.scala
package codeInsight
package intention
package controlFlow
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiDocumentManager, PsiElement}
import org.jetbrains.plugins.scala.codeInsight.ScalaCodeInsightBundle
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
/**
* @author Ksenia.Sautina
* @since 6/6/12
*/
final class MergeElseIfIntention extends PsiElementBaseIntentionAction {
override def isAvailable(project: Project, editor: Editor, element: PsiElement): Boolean = {
val ifStmt: ScIf = PsiTreeUtil.getParentOfType(element, classOf[ScIf], false)
if (ifStmt == null) return false
val offset = editor.getCaretModel.getOffset
val thenBranch = ifStmt.thenExpression.orNull
val elseBranch = ifStmt.elseExpression.orNull
if (thenBranch == null || elseBranch == null) return false
if (!(thenBranch.getTextRange.getEndOffset <= offset && offset <= elseBranch.getTextRange.getStartOffset))
return false
val blockExpr = ifStmt.elseExpression.orNull
if (blockExpr != null && blockExpr.isInstanceOf[ScBlockExpr]) {
val exprs = blockExpr.asInstanceOf[ScBlockExpr].exprs
if (exprs.size == 1 && exprs.head.isInstanceOf[ScIf]) {
return true
}
}
false
}
override def invoke(project: Project, editor: Editor, element: PsiElement): Unit = {
val ifStmt: ScIf = PsiTreeUtil.getParentOfType(element, classOf[ScIf], false)
if (ifStmt == null || !ifStmt.isValid) return
val start = ifStmt.getTextRange.getStartOffset
val startIndex = ifStmt.thenExpression.get.getTextRange.getEndOffset - ifStmt.getTextRange.getStartOffset
val endIndex = ifStmt.elseExpression.get.getTextRange.getStartOffset - ifStmt.getTextRange.getStartOffset
val elseIndex = ifStmt.getText.substring(startIndex, endIndex).indexOf("else") - 1
val diff = editor.getCaretModel.getOffset - ifStmt.thenExpression.get.getTextRange.getEndOffset - elseIndex
val newlineBeforeElse = ifStmt.children.find(_.getNode.getElementType == ScalaTokenTypes.kELSE).
exists(_.getPrevSibling.getText.contains("\\n"))
val expr = new StringBuilder
expr.append("if (").append(ifStmt.condition.get.getText).append(") ").
append(ifStmt.thenExpression.get.getText).append(if (newlineBeforeElse) "\\n" else " ").append("else ").
append(ifStmt.elseExpression.get.getText.trim.drop(1).dropRight(1))
val newIfStmt = createExpressionFromText(expr.toString())(element.getManager)
val size = newIfStmt.asInstanceOf[ScIf].thenExpression.get.getTextRange.getEndOffset -
newIfStmt.asInstanceOf[ScIf].getTextRange.getStartOffset
inWriteAction {
ifStmt.replaceExpression(newIfStmt, removeParenthesis = true)
editor.getCaretModel.moveToOffset(start + diff + size)
PsiDocumentManager.getInstance(project).commitDocument(editor.getDocument)
}
}
override def getFamilyName: String = ScalaCodeInsightBundle.message("family.name.merge.else.if")
override def getText: String = ScalaCodeInsightBundle.message("merge.elseif")
}
| JetBrains/intellij-scala | scala/codeInsight/src/org/jetbrains/plugins/scala/codeInsight/intention/controlFlow/MergeElseIfIntention.scala | Scala | apache-2.0 | 3,448 |
package org.sameersingh.scalaplot.gnuplot
/**
* @author sameer
* @date 10/9/12
*/
object Add {
val alpha = "{/Symbol a}"
val Alpha = "{/Symbol A}"
val beta = "{/Symbol b}"
val Beta = "{/Symbol B}"
val delta = "{/Symbol d}"
val Delta = "{/Symbol D}"
val phi = "{/Symbol f}"
val Phi = "{/Symbol F}"
val gamma = "{/Symbol g}"
val Gamma = "{/Symbol G}"
val lambda = "{/Symbol l}"
val Lambda = "{/Symbol L}"
val pi = "{/Symbol p}"
val Pi = "{/Symbol P}"
val theta = "{/Symbol q}"
val Theta = "{/Symbol Q}"
val tau = "{/Symbol t}"
val Tau = "{/Symbol T}"
def func(function: String): Seq[String] = {
Seq("replot %s" format (function))
}
def label(labelStr: String, xpos: Double, ypos: Double): Seq[String] = {
Seq("set label \\"%s\\" at %f,%f" format(labelStr, xpos, ypos))
}
def arrow(x1: Double, y1: Double, x2: Double, y2: Double): Seq[String] = {
Seq("set arrow from %f,%f to %f,%f" format(x1, y1, x2, y2))
}
}
| nightscape/scalaplot | src/main/scala/org/sameersingh/scalaplot/gnuplot/Add.scala | Scala | bsd-2-clause | 975 |
import com.github.mrmechko.swordnet.structures.{SRelationType, SPos, SKey}
import org.scalatest.{Matchers, FlatSpec}
/**
* Created by mechko on 6/19/15.
*/
class TypeTests extends FlatSpec with Matchers {
"An SKey" should "look like an SKey" in {
(SKey.from("cat%1:05:00::").toString) shouldBe "SKey(cat%1:05:00::)"
(SKey.from("cat#n#1").toString) shouldBe "SKey(cat%1:05:00::)"
}
"An SPos" should "look like an SPos" in {
(SPos(1).toString) shouldBe "SPos(noun)"
(SPos(2).toString) shouldBe "SPos(verb)"
(SPos(3).toString) shouldBe "SPos(adjective)"
(SPos(4).toString) shouldBe "SPos(adverb)"
(SPos(5).toString) shouldBe "SPos(satellite)"
(SPos(6).toString) shouldBe "SPos(other)"
SPos('n') shouldBe SPos("noun")
SPos(1) shouldBe SPos("noun")
SPos('v') shouldBe SPos("verb")
SPos(2) shouldBe SPos("verb")
SPos('r') shouldBe SPos("adverb")
SPos(4) shouldBe SPos("adverb")
SPos('a') shouldBe SPos("adjective")
SPos(3) shouldBe SPos("adjective")
SPos('s') shouldBe SPos("satellite")
SPos(5) shouldBe SPos("satellite")
}
"An SPos" should "take abbreviations of standard pos types" in {
SPos("adj").toString shouldBe "SPos(adjective)"
SPos("adv").toString shouldBe "SPos(adverb)"
SPos("prep").toString shouldBe "SPos(preposition)"
}
"cat%1:05:00::" should "be render SKeys" in {
val cat = SKey.from("cat%1:05:00::")
cat.key shouldBe "cat%1:05:00::"
cat.lemma shouldBe "cat"
cat.pos shouldBe SPos('n')
cat.definition shouldBe "feline mammal usually having thick soft fur and no ability to roar: domestic cats; wildcats"
cat.senseNumber shouldBe 1
cat.id shouldBe "cat#n#1"
cat.synset.keys.sortBy(_.key) shouldBe Seq(cat, SKey("true_cat%1:05:00::")).sortBy(_.key)
cat.hasSemantic(SRelationType.hypernym).map(_.head).foreach(println)
}
}
| mrmechko/SWordNet | src/test/scala/SimpleTest.scala | Scala | mit | 1,882 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
class LoneElementSpec extends Spec with SharedHelpers {
object `when used with Matchers` extends Matchers {
def `should work with xs.loneElement and passed when xs only contains one element and the one element passed the check` {
List(10).loneElement should be > 9
}
def `should throw TestFailedException with correct stack depth and message when xs.loneElement contains one element but it failed the check` {
val e = intercept[exceptions.TestFailedException] {
List(8).loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("8 was not greater than 9"))
}
def `should throw TestFailedException with correct stack depth and message when xs contains 0 element and xs.loneElement is called` {
val xs = List.empty[Int]
val e = intercept[exceptions.TestFailedException] {
xs.loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("Expected " + xs + " to contain exactly 1 element, but it has size 0"))
}
def `should throw TestFailedException with correct stack depth and message when xs contains > 1 elements and xs.loneElement is called` {
val xs = List(8, 12)
val e = intercept[exceptions.TestFailedException] {
xs.loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("Expected " + xs + " to contain exactly 1 element, but it has size 2"))
}
}
object `when used with LoneElement trait` extends LoneElement {
def `should work with xs.loneElement and passed when xs only contains one element and the one element passed the check` {
assert(List(10).loneElement > 9)
}
def `should throw TestFailedException with correct stack depth and message when xs.loneElement contains one element but it failed the check` {
val e = intercept[exceptions.TestFailedException] {
assert(List(8).loneElement > 9)
}
assert(e.failedCodeFileName === Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber === Some(thisLineNumber - 3))
assert(e.message === None)
}
def `should throw TestFailedException with correct stack depth and message when xs contains 0 element and xs.loneElement is called` {
val xs = List.empty[Int]
val e = intercept[exceptions.TestFailedException] {
assert(xs.loneElement > 9)
}
assert(e.failedCodeFileName == Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber == Some(thisLineNumber - 3))
assert(e.message === Some("Expected List() to contain exactly 1 element, but it has size 0"))
}
def `should throw TestFailedException with correct stack depth and message when xs contains > 1 elements and xs.loneElement is called` {
val xs = List(8, 12)
val e = intercept[exceptions.TestFailedException] {
assert(xs.loneElement > 9)
}
assert(e.failedCodeFileName === Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber === Some(thisLineNumber - 3))
assert(e.message === Some("Expected List(8, 12) to contain exactly 1 element, but it has size 2"))
}
}
object `when used with both Matchers and LoneElement together` extends Matchers with LoneElement {
def `should work with xs.loneElement and passed when should syntax is used and xs only contains one element and the one element passed the check` {
List(10).loneElement should be > 9
}
def `should work with xs.loneElement and passed when assert syntax is used and xs only contains one element and the one element passed the check` {
assert(List(10).loneElement > 9)
}
def `should throw TestFailedException with correct stack depth and message when should syntax is used and xs.loneElement contains one element but it failed the check` {
val e = intercept[exceptions.TestFailedException] {
List(8).loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("8 was not greater than 9"))
}
def `should throw TestFailedException with correct stack depth and message when assert syntax is used and xs.loneElement contains one element but it failed the check` {
val e = intercept[exceptions.TestFailedException] {
assert(List(8).loneElement > 9)
}
assert(e.failedCodeFileName === Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber === Some(thisLineNumber - 3))
assert(e.message === None)
}
def `should throw TestFailedException with correct stack depth and message when should syntax is used and xs contains 0 element and xs.loneElement is called` {
val xs = List.empty[Int]
val e = intercept[exceptions.TestFailedException] {
xs.loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("Expected " + xs + " to contain exactly 1 element, but it has size 0"))
}
def `should throw TestFailedException with correct stack depth and message when assert syntax is used and xs contains 0 element and xs.loneElement is called` {
val xs = List.empty[Int]
val e = intercept[exceptions.TestFailedException] {
assert(xs.loneElement > 9)
}
assert(e.failedCodeFileName == Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber == Some(thisLineNumber - 3))
assert(e.message === Some("Expected List() to contain exactly 1 element, but it has size 0"))
}
def `should throw TestFailedException with correct stack depth and message when should syntax is used and xs contains > 1 elements and xs.loneElement is called` {
val xs = List(8, 12)
val e = intercept[exceptions.TestFailedException] {
xs.loneElement should be > 9
}
e.failedCodeFileName should be (Some("LoneElementSpec.scala"))
e.failedCodeLineNumber should be (Some(thisLineNumber - 3))
e.message should be (Some("Expected " + xs + " to contain exactly 1 element, but it has size 2"))
}
def `should throw TestFailedException with correct stack depth and message when assert syntax is used and xs contains > 1 elements and xs.loneElement is called` {
val xs = List(8, 12)
val e = intercept[exceptions.TestFailedException] {
assert(xs.loneElement > 9)
}
assert(e.failedCodeFileName === Some("LoneElementSpec.scala"))
assert(e.failedCodeLineNumber === Some(thisLineNumber - 3))
assert(e.message === Some("Expected List(8, 12) to contain exactly 1 element, but it has size 2"))
}
}
}
| svn2github/scalatest | src/test/scala/org/scalatest/LoneElementSpec.scala | Scala | apache-2.0 | 7,775 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.collection
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.locks.{Lock, ReentrantReadWriteLock}
import scala.collection.{GenTraversableOnce, mutable}
import scala.reflect.ClassTag
import scala.util.Random
private[sql] class ConcurrentSegmentedHashMap[K, V, M <: SegmentMap[K, V] : ClassTag](
private val initialSize: Int,
val loadFactor: Double,
val concurrency: Int,
val segmentCreator: (Int, Double, Int, Int) => M,
val hasher: K => Int) extends Serializable {
/** maximum size of batches in bulk insert API */
private[this] final val MAX_BULK_INSERT_SIZE = 256
/**
* A default constructor creates a concurrent hash map with initial size `32`
* and concurrency `16`.
*/
def this(concurrency: Int, segmentCreator: (Int, Double, Int, Int) => M,
hasher: K => Int) =
this(32, SegmentMap.DEFAULT_LOAD_FACTOR, concurrency,
segmentCreator, hasher)
require(initialSize > 0,
s"ConcurrentSegmentedHashMap: unexpected initialSize=$initialSize")
require(loadFactor > 0.0 && loadFactor < 1.0,
s"ConcurrentSegmentedHashMap: unexpected loadFactor=$loadFactor")
require(concurrency > 0,
s"ConcurrentSegmentedHashMap: unexpected concurrency=$concurrency")
require(segmentCreator != null,
"ConcurrentSegmentedHashMap: null segmentCreator")
private def initSegmentCapacity(nsegs: Int) =
math.max(2, SegmentMap.nextPowerOf2(initialSize / nsegs))
private val _segments: Array[M] = {
val nsegs = math.min(concurrency, 1 << 16)
val segs = new Array[M](nsegs)
// calculate the initial capacity of each segment
segs.indices.foreach(i => {
segs(i) = segmentCreator(initSegmentCapacity(nsegs),
loadFactor, i, nsegs)
})
segs
}
private val _size = new AtomicLong(0)
private val (_segmentShift, _segmentMask) = {
var sshift = 0
var ssize = 1
val concurrency = _segments.length
if (concurrency > 1) {
while (ssize < concurrency) {
sshift += 1
ssize <<= 1
}
}
(32 - sshift, ssize - 1)
}
private final def segmentFor(hash: Int): M = {
_segments((hash >>> _segmentShift) & _segmentMask)
}
final def contains(k: K): Boolean = {
val hasher = this.hasher
val hash = if (hasher != null) hasher(k) else k.##
val seg = segmentFor(hash)
val lock = seg.readLock
lock.lock()
try {
seg.contains(k, hash)
} finally {
lock.unlock()
}
}
final def apply(k: K): V = {
val hasher = this.hasher
val hash = if (hasher != null) hasher(k) else k.##
val seg = segmentFor(hash)
val lock = seg.readLock
lock.lock()
try {
seg(k, hash)
} finally {
lock.unlock()
}
}
final def get(k: K): Option[V] = {
val hasher = this.hasher
val hash = if (hasher != null) hasher(k) else k.##
val seg = segmentFor(hash)
val lock = seg.readLock
lock.lock()
try {
Option(seg(k, hash))
} finally {
lock.unlock()
}
}
final def update(k: K, v: V): Boolean = {
val hasher = this.hasher
val hash = if (hasher != null) hasher(k) else k.##
val seg = segmentFor(hash)
val lock = seg.writeLock
var added = false
lock.lock()
try {
added = seg.update(k, hash, v)
} finally {
lock.unlock()
}
if (added) {
_size.incrementAndGet()
true
} else false
}
final def changeValue(k: K, change: ChangeValue[K, V]): java.lang.Boolean = {
val hasher = this.hasher
val hash = if (hasher != null) hasher(k) else k.##
val seg = segmentFor(hash)
val lock = seg.writeLock
var added: java.lang.Boolean = null
lock.lock()
try {
added = seg.changeValue(k, hash, change, isLocal = true)
} finally {
lock.unlock()
}
if (added != null && added.booleanValue()) _size.incrementAndGet()
added
}
final def bulkChangeValues(ks: Iterator[K], change: ChangeValue[K, V], bucketId: (Int) => Int,
isLocal: Boolean) : Long = {
val segs = this._segments
val segShift = _segmentShift
val segMask = _segmentMask
val hasher = this.hasher
// first group keys by segments
val nsegs = segs.length
val nsegsRange = 0 until nsegs
val groupedKeys = new Array[mutable.ArrayBuffer[K]](nsegs)
val groupedHashes = new Array[mutable.ArrayBuilder.ofInt](nsegs)
var numAdded = 0
def getLockedValidSegmentAndLock(i: Int): (M, ReentrantReadWriteLock.WriteLock) = {
var seg = segs(i)
var lock = seg.writeLock
lock.lock()
while (!seg.valid) {
lock.unlock()
seg = segs(i)
lock = seg.writeLock
lock.lock()
}
(seg, lock)
}
def addNumToSize(): Unit = {
if (numAdded > 0) {
_size.addAndGet(numAdded)
numAdded = 0
}
}
var rowCount = 0
// split into max batch sizes to avoid buffering up too much
val iter = new SlicedIterator[K](ks, 0, MAX_BULK_INSERT_SIZE)
while (iter.hasNext) {
iter.foreach { k =>
val hash = if (hasher != null) hasher(k) else k.##
val segIndex = (hash >>> segShift) & segMask
val buffer = groupedKeys(segIndex)
if (buffer != null) {
buffer += change.keyCopy(k)
groupedHashes(segIndex) += hash
} else {
val newBuffer = new mutable.ArrayBuffer[K](4)
val newHashBuffer = new mutable.ArrayBuilder.ofInt()
newHashBuffer.sizeHint(4)
newBuffer += change.keyCopy(k)
newHashBuffer += hash
groupedKeys(segIndex) = newBuffer
groupedHashes(segIndex) = newHashBuffer
}
rowCount += 1
}
var lockedState = false
// now lock segments one by one and then apply changes for all keys
// of the locked segment
// shuffle the indexes to minimize segment thread contention
Random.shuffle[Int, IndexedSeq](nsegsRange).foreach { i =>
val keys = groupedKeys(i)
if (keys != null) {
val hashes = groupedHashes(i).result()
val nhashes = hashes.length
var (seg, lock) = getLockedValidSegmentAndLock(i)
lockedState = true
try {
var added: java.lang.Boolean = null
var idx = 0
while (idx < nhashes) {
added = seg.changeValue(keys(idx), bucketId(hashes(idx)), change, isLocal)
if (added != null) {
if (added.booleanValue()) {
numAdded += 1
}
idx += 1
} else {
// indicates that loop must be broken immediately
// need to take the latest reference of segmnet
// after segmnetAbort is successful
addNumToSize()
lock.unlock()
lockedState = false
// Because two threads can concurrently call segmentAbort
// & is since locks are released, there is no guarantee that
// one thread would correctly identify if the other has cleared
// the segments. So after the changeSegment, it should unconditionally
// refresh the segments
change.segmentAbort(seg)
val segmentAndLock = getLockedValidSegmentAndLock(i)
lockedState = true
seg = segmentAndLock._1
lock = segmentAndLock._2
idx += 1
}
}
} finally {
if (lockedState) {
addNumToSize()
lock.unlock()
}
}
// invoke the segmentEnd method outside of the segment lock
change.segmentEnd(seg)
}
}
// pick up another set of keys+values
iter.setSlice(0, MAX_BULK_INSERT_SIZE)
for (b <- groupedKeys) if (b != null) b.clear()
for (b <- groupedHashes) if (b != null) b.clear()
}
rowCount
}
def foldSegments[U](init: U)(f: (U, M) => U): U = _segments.foldLeft(init)(f)
def foldSegments[U](start: Int, end: Int, init: U)(f: (U, M) => U): U = {
val segments = _segments
(start until end).foldLeft(init)((itr, i) => f(itr, segments(i)))
}
/**
* No synchronization in this method so use with care.
* Use it only if you know what you are doing.
*/
def flatMap[U](f: M => GenTraversableOnce[U]): Iterator[U] =
_segments.iterator.flatMap(f)
def foldValuesRead[U](init: U, f: (Int, V, U) => U): U = {
_segments.foldLeft(init) { (v, seg) =>
SegmentMap.lock(seg.readLock()) {
seg.foldValues(v, f)
}
}
}
def foldEntriesRead[U](init: U, copyIfRequired: Boolean,
f: (K, V, U) => U): U = {
_segments.foldLeft(init) { (v, seg) =>
SegmentMap.lock(seg.readLock()) {
seg.foldEntries(v, copyIfRequired, f)
}
}
}
def writeLockAllSegments[U](f: Array[M] => U): U = {
val segments = _segments
val locksObtained = new mutable.ArrayBuffer[Lock](segments.length)
try {
for (seg <- segments) {
val lock = seg.writeLock()
lock.lock()
locksObtained += lock
}
f(segments)
} finally {
for (lock <- locksObtained) {
lock.unlock()
}
}
}
def clear(): Unit = writeLockAllSegments { segments =>
val nsegments = segments.length
segments.indices.foreach(i => {
segments(i).valid_=(false)
segments(i).clearBucket()
segments(i) = segmentCreator(initSegmentCapacity(segments.length), loadFactor, i, nsegments)
})
_size.set(0)
}
final def size: Long = _size.get
final def isEmpty: Boolean = _size.get == 0
def toSeq: Seq[(K, V)] = {
val size = this.size
if (size <= Int.MaxValue) {
val buffer = new mutable.ArrayBuffer[(K, V)](size.toInt)
foldEntriesRead[Unit]((), true, { (k, v, _) => buffer += ((k, v)) })
buffer
} else {
throw new IllegalStateException(s"ConcurrentSegmentedHashMap: size=$size" +
" is greater than maximum integer so cannot be converted to a flat Seq")
}
}
def toValues: Seq[V] = {
val size = this.size
if (size <= Int.MaxValue) {
val buffer = new mutable.ArrayBuffer[V](size.toInt)
foldValuesRead[Unit]((), { (_, v, _) => buffer += v })
buffer
} else {
throw new IllegalStateException(s"ConcurrentSegmentedHashMap: size=$size" +
" is greater than maximum integer so cannot be converted to a flat Seq")
}
}
def toKeys: Seq[K] = {
val size = this.size
if (size <= Int.MaxValue) {
val buffer = new mutable.ArrayBuffer[K](size.toInt)
foldEntriesRead[Unit]((), true, { (k, _, _) => buffer += k })
buffer
} else {
throw new IllegalStateException(s"ConcurrentSegmentedHashMap: size=$size" +
" is greater than maximum integer so cannot be converted to a flat Seq")
}
}
}
| vjr/snappydata | core/src/main/scala/org/apache/spark/sql/collection/ConcurrentSegmentedHashMap.scala | Scala | apache-2.0 | 11,635 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.utils
import scala.util.Try
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.yaidom.core.QName
import eu.cdevreeze.yaidom.core.Scope
/**
* TextENameExtractor that treats the text as a QName, to be resolved using the passed Scope.
*/
object SimpleTextENameExtractor extends TextENameExtractor {
def extractENames(scope: Scope, text: String): Set[EName] = {
val ename =
Try(scope.resolveQNameOption(QName(text)).get).getOrElse(
sys.error(s"String '${text}' could not be resolved in scope $scope"))
Set(ename)
}
}
| dvreeze/yaidom | shared/src/main/scala/eu/cdevreeze/yaidom/utils/SimpleTextENameExtractor.scala | Scala | apache-2.0 | 1,182 |
package com.twitter.finatra.kafkastreams.flushing
import com.twitter.finatra.kafkastreams.internal.utils.ProcessorContextLogging
import com.twitter.finatra.kafkastreams.transformer.lifecycle.{OnClose, OnFlush, OnInit}
import com.twitter.util.Duration
import org.apache.kafka.streams.StreamsConfig
import org.apache.kafka.streams.processor.{Cancellable, PunctuationType, Punctuator}
trait Flushing extends OnInit with OnClose with OnFlush with ProcessorContextLogging {
@volatile private var commitPunctuatorCancellable: Cancellable = _
protected def commitInterval: Duration
//TODO: Create and use frameworkOnInit for framework use
override def onInit(): Unit = {
super.onInit()
val streamsCommitIntervalMillis = processorContext
.appConfigs().get(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG).asInstanceOf[java.lang.Long]
assert(
streamsCommitIntervalMillis == Duration.Top.inMillis,
s"You're using an operator that requires 'Flushing' functionality (e.g. FlushingProcessor/Transformer or AsyncProcessor/Transformer). As such, your server must mixin FlushingAwareServer so that automatic Kafka Streams commit will be disabled."
)
if (commitInterval != Duration.Top) {
info(s"Scheduling timer to call commit every $commitInterval")
commitPunctuatorCancellable = processorContext
.schedule(
commitInterval.inMillis,
PunctuationType.WALL_CLOCK_TIME,
new Punctuator {
override def punctuate(timestamp: Long): Unit = {
onFlush()
processorContext.commit()
}
})
}
}
//TODO: Create and use frameworkOnClose
override def onClose(): Unit = {
super.onClose()
if (commitPunctuatorCancellable != null) {
commitPunctuatorCancellable.cancel()
commitPunctuatorCancellable = null
}
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/flushing/Flushing.scala | Scala | apache-2.0 | 1,867 |
package scala.slick.jdbc.meta
import java.sql._
import scala.slick.jdbc.{ResultSetInvoker, UnitInvoker}
/**
* A wrapper for a row in the ResultSet returned by DatabaseMetaData.getFunctions().
*/
case class MFunction(name: MQName, remarks: String, returnsTable: Option[Boolean], specificName: String) {
def getFunctionColumns(columnNamePattern: String = "%") =
MFunctionColumn.getFunctionColumns(name, columnNamePattern)
}
object MFunction {
private[this] val m = try { classOf[DatabaseMetaData].getMethod("getFunctions", classOf[String], classOf[String], classOf[String]) }
catch { case _:NoSuchMethodException => null }
def getFunctions(namePattern: MQName) = {
/* Regular version, requires Java 1.6:
ResultSetInvoker[MFunction](
_.metaData.getFunctions(namePattern.catalog_?, namePattern.schema_?, namePattern.name)) { r =>
MFunction(MQName.from(r), r<<, r.nextShort match {
case DatabaseMetaData.functionNoTable => Some(false)
case DatabaseMetaData.functionReturnsTable => Some(true)
case _ => None
}, r<<)
}*/
if(m == null) UnitInvoker.empty
else ResultSetInvoker[MFunction]( s =>
DatabaseMeta.invokeForRS(m, s.metaData, namePattern.catalog_?, namePattern.schema_?, namePattern.name)) { r =>
MFunction(MQName.from(r), r<<, r.nextShort match {
case 1 /*DatabaseMetaData.functionNoTable*/ => Some(false)
case 2 /*DatabaseMetaData.functionReturnsTable*/ => Some(true)
case _ => None
}, r<<)
}
}
}
| szeiger/scala-query | src/main/scala/scala/slick/jdbc/meta/MFunction.scala | Scala | bsd-2-clause | 1,545 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree.impl
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.impl.TreeTests
import org.apache.spark.ml.tree.{ContinuousSplit, DecisionTreeModel, LeafNode, Node}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.tree.impurity.GiniCalculator
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.util.collection.OpenHashMap
/**
* 随机森林树:综合多个决策树,可以消除噪声,避免过拟合
* 随机森林(Random Forests)其实就是多个决策树,每个决策树有一个权重,对未知数据进行预测时,
* 会用多个决策树分别预测一个值,然后考虑树的权重,将这多个预测值综合起来,
* 对于分类问题,采用多数表决,对于回归问题,直接求平均。
* Test suite for [[RandomForest]].
*/
class RandomForestSuite extends SparkFunSuite with MLlibTestSparkContext {
import RandomForestSuite.mapToVec
test("computeFeatureImportance, featureImportances") {//计算功能的重要性,功能的重要性
/* Build tree for testing, with this structure:
* 用于测试的生成树,有了这个结构:
grandParent
left2 parent
left right
*/
//基尼计算器
val leftImp = new GiniCalculator(Array(3.0, 2.0, 1.0))
val left = new LeafNode(0.0, leftImp.calculate(), leftImp)
val rightImp = new GiniCalculator(Array(1.0, 2.0, 5.0))
val right = new LeafNode(2.0, rightImp.calculate(), rightImp)
val parent = TreeTests.buildParentNode(left, right, new ContinuousSplit(0, 0.5))
val parentImp = parent.impurityStats
val left2Imp = new GiniCalculator(Array(1.0, 6.0, 1.0))
val left2 = new LeafNode(0.0, left2Imp.calculate(), left2Imp)
//重大父母
val grandParent = TreeTests.buildParentNode(left2, parent, new ContinuousSplit(1, 1.0))
val grandImp = grandParent.impurityStats
// Test feature importance computed at different subtrees.
//计算测试特征的重要性在不同的子树
def testNode(node: Node, expected: Map[Int, Double]): Unit = {
val map = new OpenHashMap[Int, Double]()
RandomForest.computeFeatureImportance(node, map)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
// Leaf node
//叶节点
testNode(left, Map.empty[Int, Double])
// Internal node with 2 leaf children
//具有2个叶的儿童的内部节点
val feature0importance = parentImp.calculate() * parentImp.count -
(leftImp.calculate() * leftImp.count + rightImp.calculate() * rightImp.count)
testNode(parent, Map(0 -> feature0importance))
// Full tree
//满树
//重要特征1
val feature1importance = grandImp.calculate() * grandImp.count -
(left2Imp.calculate() * left2Imp.count + parentImp.calculate() * parentImp.count)
testNode(grandParent, Map(0 -> feature0importance, 1 -> feature1importance))
// Forest consisting of (full tree) + (internal node with 2 leafs)
//林组成(全树)+(2内部节点的叶子)
val trees = Array(parent, grandParent).map { root =>//numClasses 分类数
new DecisionTreeClassificationModel(root, numClasses = 3).asInstanceOf[DecisionTreeModel]
}
//重要
val importances: Vector = RandomForest.featureImportances(trees, 2)
val tree2norm = feature0importance + feature1importance
val expected = Vectors.dense((1.0 + feature0importance / tree2norm) / 2.0,
(feature1importance / tree2norm) / 2.0)
assert(importances ~== expected relTol 0.01)
}
test("normalizeMapValues") {//规范Map的值
val map = new OpenHashMap[Int, Double]()
map(0) = 1.0
map(2) = 2.0
RandomForest.normalizeMapValues(map)
val expected = Map(0 -> 1.0 / 3.0, 2 -> 2.0 / 3.0)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
}
/**
* 随机森林树套件
*/
private object RandomForestSuite {
def mapToVec(map: Map[Int, Double]): Vector = {
val size = (map.keys.toSeq :+ 0).max + 1
val (indices, values) = map.toSeq.sortBy(_._1).unzip
Vectors.sparse(size, indices.toArray, values.toArray)
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/ml/tree/impl/RandomForestSuite.scala | Scala | apache-2.0 | 5,094 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package backend
import io.AbstractFile
import scala.tools.nsc.util.ClassPath
/** The platform dependent pieces of Global.
*/
trait Platform {
val symbolTable: symtab.SymbolTable
import symbolTable._
/** The new implementation of compiler classpath. */
private[nsc] def classPath: ClassPath
/** Update classpath with a substitution that maps entries to entries */
def updateClassPath(subst: Map[ClassPath, ClassPath]): Unit
/** Any platform-specific phases. */
def platformPhases: List[SubComponent]
/** Symbol for a method which compares two objects. */
def externalEquals: Symbol
/** The various ways a boxed primitive might materialize at runtime. */
def isMaybeBoxed(sym: Symbol): Boolean
/**
* Tells whether a class with both a binary and a source representation
* (found in classpath and in sourcepath) should be re-compiled. Behaves
* on the JVM similar to javac, i.e. if the source file is newer than the classfile,
* a re-compile is triggered. On .NET by contrast classfiles always take precedence.
*/
def needCompile(bin: AbstractFile, src: AbstractFile): Boolean
}
| scala/scala | src/compiler/scala/tools/nsc/backend/Platform.scala | Scala | apache-2.0 | 1,442 |
package at.logic.gapt.provers.atp.commands.refinements.simple
import at.logic.gapt.proofs.lk.base.Sequent
import at.logic.gapt.proofs.resolution.ResolutionProof
import at.logic.gapt.provers.atp.commands.base.InitialCommand
import at.logic.gapt.provers.atp.commands.refinements.base.{ Refinement, RefinementID }
import at.logic.gapt.provers.atp.Definitions._
import at.logic.gapt.utils.ds.PublishingBuffer
import at.logic.gapt.utils.logging.Logger
import collection.mutable.ListBuffer
// the command
case class SimpleRefinementGetCommand[V <: Sequent]() extends InitialCommand[V] with Logger {
def apply( state: State ) = {
val refinement =
if ( state.isDefinedAt( RefinementID() ) ) state( RefinementID() ).asInstanceOf[SimpleRefinement[V]]
else {
val ref = new SimpleRefinement( state( "clauses" ).asInstanceOf[PublishingBuffer[ResolutionProof[V]]] )
state += new Tuple2( RefinementID(), ref )
ref
}
refinement.getNext match {
case None => List()
case Some( p ) => debug( p toString ); List( ( state, p ) )
}
}
override def toString = "SimpleRefinementGetCommand()"
}
private[refinements] class SimpleRefinement[V <: Sequent]( clauses: PublishingBuffer[ResolutionProof[V]] ) extends Refinement[V]( clauses ) {
val pairs = new ListBuffer[Tuple2[ResolutionProof[V], ResolutionProof[V]]] // all pairs of possible two clauses
insertClauses
def getNext: Option[Tuple2[ResolutionProof[V], ResolutionProof[V]]] = if ( isEmpty ) None else Some( pairs.remove( 0 ) )
private def insertClauses = {
val tmp = clauses.toList
pairs ++= ( for {
( a, i ) <- tmp.zip( tmp.indices )
j <- tmp.indices
if ( j > i )
} yield ( a, clauses( j ) ) )
}
protected def addClause( s: ResolutionProof[V] ) = {
pairs ++= clauses.map( a => ( s, a ) )
}
protected def removeClause( s: ResolutionProof[V] ) = {
pairs.filter( x => ( x._1.root syntacticMultisetEquals s.root ) || ( x._2.root syntacticMultisetEquals s.root ) ).foreach( x => pairs -= x )
}
def isEmpty: Boolean = pairs.isEmpty
override def toString = "SimpleRefinement(" + clauses + ")"
}
| gisellemnr/gapt | src/main/scala/at/logic/gapt/provers/atp/commands/refinements/simple.scala | Scala | gpl-3.0 | 2,167 |
/*
* Copyright (c) 2013-2014, ARM Limited
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.arm.carp.apps.optimizer.passes
import java.io.File;
import java.io.PrintWriter;
import com.arm.carp.pencil._
/**
* This class puts each non-static function definition into a new file.
* Based on the call graph, each new file will contain the definitions of all
* directly or indirectly called functions that were defined in the input
* program.
*/
object SplitFile extends Pass("splitfile") {
val config = WalkerConfig.expressions
private val outfile = registerOption("fname", "splitfile-out")
/* Contains current function when building call graph */
private var current_function: Option[Function] = None
private val callgraph = new CallGraph
private var progConsts: Seq[Variable] = Seq()
private var progTypes: Seq[StructType] = Seq()
private def dirname = (outfile() match {
case "-" => ""
case filename =>
val dirsep = filename.lastIndexOf("/")
if (dirsep > 0) {
filename.substring(0, dirsep) + "/"
}
else {
""
}
})
/**
* Write given function to its own file, including all functions it calls.
* @param function Function to write.
*/
private def printToFile(function: Function): Unit = {
val filename = dirname + function.getName() + ".c"
System.err.println("Writing " + filename)
val functions = callgraph.getAllCallees(function)
val program = new Program(functions + function, progTypes, progConsts)
val printer = new Printer
val writer = new PrintWriter(new File(filename))
writer.append(printer.toPencil(program, true, true, false))
writer.close
}
/**
* Print given function to a new file if it is defined and is not a local
* function.
*/
private def processFunction(function: Function): Unit = {
function.ops match {
case Some(x) => if (!function.local) printToFile(function)
case None =>
}
}
/** Build the call graph */
override def walkCallExpression(call: CallExpression) = {
callgraph.addCall(current_function.get, call.func)
super.walkCallExpression(call)
}
/** Build the call graph */
override def walkFunction(function: Function) = {
current_function = Some(function)
super.walkFunction(function)
}
/**
* Execute SplitFile pass on a program.
* @param program Program to act on.
* @return Original program.
*/
override def walkProgram(program: Program) = {
progConsts = program.consts
progTypes = program.types
callgraph.clear
val res = super.walkProgram(program)
program.functions.foreach(processFunction(_))
res
}
}
| Meinersbur/pencil | src/scala/com/arm/carp/apps/optimizer/passes/SplitFile.scala | Scala | mit | 3,722 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Collections
import java.util.concurrent.locks.ReentrantReadWriteLock
import scala.collection.{Seq, Set, mutable}
import scala.jdk.CollectionConverters._
import kafka.cluster.{Broker, EndPoint}
import kafka.api._
import kafka.controller.StateChangeLogger
import kafka.server.metadata.{MetadataBroker, RaftMetadataCache}
import kafka.utils.CoreUtils._
import kafka.utils.Logging
import kafka.utils.Implicits._
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
import org.apache.kafka.common.{Cluster, Node, PartitionInfo, TopicPartition, Uuid}
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition
import org.apache.kafka.common.message.{MetadataResponseData, UpdateMetadataRequestData}
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.{MetadataResponse, UpdateMetadataRequest}
import org.apache.kafka.common.security.auth.SecurityProtocol
trait MetadataCache {
/**
* Return topic metadata for a given set of topics and listener. See KafkaApis#handleTopicMetadataRequest for details
* on the use of the two boolean flags.
*
* @param topics The set of topics.
* @param listenerName The listener name.
* @param errorUnavailableEndpoints If true, we return an error on unavailable brokers. This is used to support
* MetadataResponse version 0.
* @param errorUnavailableListeners If true, return LEADER_NOT_AVAILABLE if the listener is not found on the leader.
* This is used for MetadataResponse versions 0-5.
* @return A collection of topic metadata.
*/
def getTopicMetadata(
topics: collection.Set[String],
listenerName: ListenerName,
errorUnavailableEndpoints: Boolean = false,
errorUnavailableListeners: Boolean = false): collection.Seq[MetadataResponseData.MetadataResponseTopic]
def getAllTopics(): collection.Set[String]
def getAllPartitions(): collection.Set[TopicPartition]
def getNonExistingTopics(topics: collection.Set[String]): collection.Set[String]
def getAliveBroker(brokerId: Int): Option[MetadataBroker]
def getAliveBrokers: collection.Seq[MetadataBroker]
def getPartitionInfo(topic: String, partitionId: Int): Option[UpdateMetadataRequestData.UpdateMetadataPartitionState]
def numPartitions(topic: String): Option[Int]
/**
* Get a partition leader's endpoint
*
* @return If the leader is known, and the listener name is available, return Some(node). If the leader is known,
* but the listener is unavailable, return Some(Node.NO_NODE). Otherwise, if the leader is not known,
* return None
*/
def getPartitionLeaderEndpoint(topic: String, partitionId: Int, listenerName: ListenerName): Option[Node]
def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node]
def getControllerId: Option[Int]
def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster
/**
* Update the metadata cache with a given UpdateMetadataRequest.
*
* @return The deleted topics from the given UpdateMetadataRequest.
*/
def updateMetadata(correlationId: Int, request: UpdateMetadataRequest): collection.Seq[TopicPartition]
def contains(topic: String): Boolean
def contains(tp: TopicPartition): Boolean
}
object MetadataCache {
def zkMetadataCache(brokerId: Int): ZkMetadataCache = {
new ZkMetadataCache(brokerId)
}
def raftMetadataCache(brokerId: Int): RaftMetadataCache = {
new RaftMetadataCache(brokerId)
}
}
/**
* A cache for the state (e.g., current leader) of each partition. This cache is updated through
* UpdateMetadataRequest from the controller. Every broker maintains the same cache, asynchronously.
*/
class ZkMetadataCache(brokerId: Int) extends MetadataCache with Logging {
private val partitionMetadataLock = new ReentrantReadWriteLock()
//this is the cache state. every MetadataSnapshot instance is immutable, and updates (performed under a lock)
//replace the value with a completely new one. this means reads (which are not under any lock) need to grab
//the value of this var (into a val) ONCE and retain that read copy for the duration of their operation.
//multiple reads of this value risk getting different snapshots.
@volatile private var metadataSnapshot: MetadataSnapshot = MetadataSnapshot(partitionStates = mutable.AnyRefMap.empty,
topicIds = Map.empty, controllerId = None, aliveBrokers = mutable.LongMap.empty, aliveNodes = mutable.LongMap.empty)
this.logIdent = s"[MetadataCache brokerId=$brokerId] "
private val stateChangeLogger = new StateChangeLogger(brokerId, inControllerContext = false, None)
// This method is the main hotspot when it comes to the performance of metadata requests,
// we should be careful about adding additional logic here. Relatedly, `brokers` is
// `List[Integer]` instead of `List[Int]` to avoid a collection copy.
// filterUnavailableEndpoints exists to support v0 MetadataResponses
private def maybeFilterAliveReplicas(snapshot: MetadataSnapshot,
brokers: java.util.List[Integer],
listenerName: ListenerName,
filterUnavailableEndpoints: Boolean): java.util.List[Integer] = {
if (!filterUnavailableEndpoints) {
brokers
} else {
val res = new util.ArrayList[Integer](math.min(snapshot.aliveBrokers.size, brokers.size))
for (brokerId <- brokers.asScala) {
if (hasAliveEndpoint(snapshot, brokerId, listenerName))
res.add(brokerId)
}
res
}
}
// errorUnavailableEndpoints exists to support v0 MetadataResponses
// If errorUnavailableListeners=true, return LISTENER_NOT_FOUND if listener is missing on the broker.
// Otherwise, return LEADER_NOT_AVAILABLE for broker unavailable and missing listener (Metadata response v5 and below).
private def getPartitionMetadata(snapshot: MetadataSnapshot, topic: String, listenerName: ListenerName, errorUnavailableEndpoints: Boolean,
errorUnavailableListeners: Boolean): Option[Iterable[MetadataResponsePartition]] = {
snapshot.partitionStates.get(topic).map { partitions =>
partitions.map { case (partitionId, partitionState) =>
val topicPartition = new TopicPartition(topic, partitionId.toInt)
val leaderBrokerId = partitionState.leader
val leaderEpoch = partitionState.leaderEpoch
val maybeLeader = getAliveEndpoint(snapshot, leaderBrokerId, listenerName)
val replicas = partitionState.replicas
val filteredReplicas = maybeFilterAliveReplicas(snapshot, replicas, listenerName, errorUnavailableEndpoints)
val isr = partitionState.isr
val filteredIsr = maybeFilterAliveReplicas(snapshot, isr, listenerName, errorUnavailableEndpoints)
val offlineReplicas = partitionState.offlineReplicas
maybeLeader match {
case None =>
val error = if (!snapshot.aliveBrokers.contains(leaderBrokerId)) { // we are already holding the read lock
debug(s"Error while fetching metadata for $topicPartition: leader not available")
Errors.LEADER_NOT_AVAILABLE
} else {
debug(s"Error while fetching metadata for $topicPartition: listener $listenerName " +
s"not found on leader $leaderBrokerId")
if (errorUnavailableListeners) Errors.LISTENER_NOT_FOUND else Errors.LEADER_NOT_AVAILABLE
}
new MetadataResponsePartition()
.setErrorCode(error.code)
.setPartitionIndex(partitionId.toInt)
.setLeaderId(MetadataResponse.NO_LEADER_ID)
.setLeaderEpoch(leaderEpoch)
.setReplicaNodes(filteredReplicas)
.setIsrNodes(filteredIsr)
.setOfflineReplicas(offlineReplicas)
case Some(_) =>
val error = if (filteredReplicas.size < replicas.size) {
debug(s"Error while fetching metadata for $topicPartition: replica information not available for " +
s"following brokers ${replicas.asScala.filterNot(filteredReplicas.contains).mkString(",")}")
Errors.REPLICA_NOT_AVAILABLE
} else if (filteredIsr.size < isr.size) {
debug(s"Error while fetching metadata for $topicPartition: in sync replica information not available for " +
s"following brokers ${isr.asScala.filterNot(filteredIsr.contains).mkString(",")}")
Errors.REPLICA_NOT_AVAILABLE
} else {
Errors.NONE
}
new MetadataResponsePartition()
.setErrorCode(error.code)
.setPartitionIndex(partitionId.toInt)
.setLeaderId(maybeLeader.map(_.id()).getOrElse(MetadataResponse.NO_LEADER_ID))
.setLeaderEpoch(leaderEpoch)
.setReplicaNodes(filteredReplicas)
.setIsrNodes(filteredIsr)
.setOfflineReplicas(offlineReplicas)
}
}
}
}
/**
* Check whether a broker is alive and has a registered listener matching the provided name.
* This method was added to avoid unnecessary allocations in [[maybeFilterAliveReplicas]], which is
* a hotspot in metadata handling.
*/
private def hasAliveEndpoint(snapshot: MetadataSnapshot, brokerId: Int, listenerName: ListenerName): Boolean = {
snapshot.aliveNodes.get(brokerId).exists(_.contains(listenerName))
}
/**
* Get the endpoint matching the provided listener if the broker is alive. Note that listeners can
* be added dynamically, so a broker with a missing listener could be a transient error.
*
* @return None if broker is not alive or if the broker does not have a listener named `listenerName`.
*/
private def getAliveEndpoint(snapshot: MetadataSnapshot, brokerId: Int, listenerName: ListenerName): Option[Node] = {
snapshot.aliveNodes.get(brokerId).flatMap(_.get(listenerName))
}
// errorUnavailableEndpoints exists to support v0 MetadataResponses
def getTopicMetadata(topics: Set[String],
listenerName: ListenerName,
errorUnavailableEndpoints: Boolean = false,
errorUnavailableListeners: Boolean = false): Seq[MetadataResponseTopic] = {
val snapshot = metadataSnapshot
topics.toSeq.flatMap { topic =>
getPartitionMetadata(snapshot, topic, listenerName, errorUnavailableEndpoints, errorUnavailableListeners).map { partitionMetadata =>
new MetadataResponseTopic()
.setErrorCode(Errors.NONE.code)
.setName(topic)
.setTopicId(snapshot.topicIds.getOrElse(topic, Uuid.ZERO_UUID))
.setIsInternal(Topic.isInternal(topic))
.setPartitions(partitionMetadata.toBuffer.asJava)
}
}
}
def getAllTopics(): Set[String] = {
getAllTopics(metadataSnapshot)
}
def getAllPartitions(): Set[TopicPartition] = {
metadataSnapshot.partitionStates.flatMap { case (topicName, partitionsAndStates) =>
partitionsAndStates.keys.map(partitionId => new TopicPartition(topicName, partitionId.toInt))
}.toSet
}
private def getAllTopics(snapshot: MetadataSnapshot): Set[String] = {
snapshot.partitionStates.keySet
}
private def getAllPartitions(snapshot: MetadataSnapshot): Map[TopicPartition, UpdateMetadataPartitionState] = {
snapshot.partitionStates.flatMap { case (topic, partitionStates) =>
partitionStates.map { case (partition, state ) => (new TopicPartition(topic, partition.toInt), state) }
}.toMap
}
def getNonExistingTopics(topics: Set[String]): Set[String] = {
topics.diff(metadataSnapshot.partitionStates.keySet)
}
def getAliveBroker(brokerId: Int): Option[MetadataBroker] = {
metadataSnapshot.aliveBrokers.get(brokerId).map(MetadataBroker.apply)
}
def getAliveBrokers: Seq[MetadataBroker] = {
metadataSnapshot.aliveBrokers.values.map(MetadataBroker.apply).toBuffer
}
private def addOrUpdatePartitionInfo(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]],
topic: String,
partitionId: Int,
stateInfo: UpdateMetadataPartitionState): Unit = {
val infos = partitionStates.getOrElseUpdate(topic, mutable.LongMap.empty)
infos(partitionId) = stateInfo
}
def getPartitionInfo(topic: String, partitionId: Int): Option[UpdateMetadataPartitionState] = {
metadataSnapshot.partitionStates.get(topic).flatMap(_.get(partitionId))
}
def numPartitions(topic: String): Option[Int] = {
metadataSnapshot.partitionStates.get(topic).map(_.size)
}
// if the leader is not known, return None;
// if the leader is known and corresponding node is available, return Some(node)
// if the leader is known but corresponding node with the listener name is not available, return Some(NO_NODE)
def getPartitionLeaderEndpoint(topic: String, partitionId: Int, listenerName: ListenerName): Option[Node] = {
val snapshot = metadataSnapshot
snapshot.partitionStates.get(topic).flatMap(_.get(partitionId)) map { partitionInfo =>
val leaderId = partitionInfo.leader
snapshot.aliveNodes.get(leaderId) match {
case Some(nodeMap) =>
nodeMap.getOrElse(listenerName, Node.noNode)
case None =>
Node.noNode
}
}
}
def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node] = {
val snapshot = metadataSnapshot
snapshot.partitionStates.get(tp.topic).flatMap(_.get(tp.partition)).map { partitionInfo =>
val replicaIds = partitionInfo.replicas
replicaIds.asScala
.map(replicaId => replicaId.intValue() -> {
snapshot.aliveBrokers.get(replicaId.longValue()) match {
case Some(broker) =>
broker.getNode(listenerName).getOrElse(Node.noNode())
case None =>
Node.noNode()
}}).toMap
.filter(pair => pair match {
case (_, node) => !node.isEmpty
})
}.getOrElse(Map.empty[Int, Node])
}
def getControllerId: Option[Int] = metadataSnapshot.controllerId
def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster = {
val snapshot = metadataSnapshot
val nodes = snapshot.aliveNodes.flatMap { case (id, nodesByListener) =>
nodesByListener.get(listenerName).map { node =>
id -> node
}
}
def node(id: Integer): Node = {
nodes.getOrElse(id.toLong, new Node(id, "", -1))
}
val partitions = getAllPartitions(snapshot)
.filter { case (_, state) => state.leader != LeaderAndIsr.LeaderDuringDelete }
.map { case (tp, state) =>
new PartitionInfo(tp.topic, tp.partition, node(state.leader),
state.replicas.asScala.map(node).toArray,
state.isr.asScala.map(node).toArray,
state.offlineReplicas.asScala.map(node).toArray)
}
val unauthorizedTopics = Collections.emptySet[String]
val internalTopics = getAllTopics(snapshot).filter(Topic.isInternal).asJava
new Cluster(clusterId, nodes.values.toBuffer.asJava,
partitions.toBuffer.asJava,
unauthorizedTopics, internalTopics,
snapshot.controllerId.map(id => node(id)).orNull)
}
// This method returns the deleted TopicPartitions received from UpdateMetadataRequest
def updateMetadata(correlationId: Int, updateMetadataRequest: UpdateMetadataRequest): Seq[TopicPartition] = {
inWriteLock(partitionMetadataLock) {
val aliveBrokers = new mutable.LongMap[Broker](metadataSnapshot.aliveBrokers.size)
val aliveNodes = new mutable.LongMap[collection.Map[ListenerName, Node]](metadataSnapshot.aliveNodes.size)
val controllerIdOpt = updateMetadataRequest.controllerId match {
case id if id < 0 => None
case id => Some(id)
}
updateMetadataRequest.liveBrokers.forEach { broker =>
// `aliveNodes` is a hot path for metadata requests for large clusters, so we use java.util.HashMap which
// is a bit faster than scala.collection.mutable.HashMap. When we drop support for Scala 2.10, we could
// move to `AnyRefMap`, which has comparable performance.
val nodes = new java.util.HashMap[ListenerName, Node]
val endPoints = new mutable.ArrayBuffer[EndPoint]
broker.endpoints.forEach { ep =>
val listenerName = new ListenerName(ep.listener)
endPoints += new EndPoint(ep.host, ep.port, listenerName, SecurityProtocol.forId(ep.securityProtocol))
nodes.put(listenerName, new Node(broker.id, ep.host, ep.port))
}
aliveBrokers(broker.id) = Broker(broker.id, endPoints, Option(broker.rack))
aliveNodes(broker.id) = nodes.asScala
}
aliveNodes.get(brokerId).foreach { listenerMap =>
val listeners = listenerMap.keySet
if (!aliveNodes.values.forall(_.keySet == listeners))
error(s"Listeners are not identical across brokers: $aliveNodes")
}
val newTopicIds = updateMetadataRequest.topicStates().asScala
.map(topicState => (topicState.topicName(), topicState.topicId()))
.filter(_._2 != Uuid.ZERO_UUID).toMap
val topicIds = mutable.Map.empty[String, Uuid]
topicIds ++= metadataSnapshot.topicIds
topicIds ++= newTopicIds
val deletedPartitions = new mutable.ArrayBuffer[TopicPartition]
if (!updateMetadataRequest.partitionStates.iterator.hasNext) {
metadataSnapshot = MetadataSnapshot(metadataSnapshot.partitionStates, topicIds.toMap, controllerIdOpt, aliveBrokers, aliveNodes)
} else {
//since kafka may do partial metadata updates, we start by copying the previous state
val partitionStates = new mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]](metadataSnapshot.partitionStates.size)
metadataSnapshot.partitionStates.forKeyValue { (topic, oldPartitionStates) =>
val copy = new mutable.LongMap[UpdateMetadataPartitionState](oldPartitionStates.size)
copy ++= oldPartitionStates
partitionStates(topic) = copy
}
val traceEnabled = stateChangeLogger.isTraceEnabled
val controllerId = updateMetadataRequest.controllerId
val controllerEpoch = updateMetadataRequest.controllerEpoch
val newStates = updateMetadataRequest.partitionStates.asScala
newStates.foreach { state =>
// per-partition logging here can be very expensive due going through all partitions in the cluster
val tp = new TopicPartition(state.topicName, state.partitionIndex)
if (state.leader == LeaderAndIsr.LeaderDuringDelete) {
removePartitionInfo(partitionStates, topicIds, tp.topic, tp.partition)
if (traceEnabled)
stateChangeLogger.trace(s"Deleted partition $tp from metadata cache in response to UpdateMetadata " +
s"request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId")
deletedPartitions += tp
} else {
addOrUpdatePartitionInfo(partitionStates, tp.topic, tp.partition, state)
if (traceEnabled)
stateChangeLogger.trace(s"Cached leader info $state for partition $tp in response to " +
s"UpdateMetadata request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId")
}
}
val cachedPartitionsCount = newStates.size - deletedPartitions.size
stateChangeLogger.info(s"Add $cachedPartitionsCount partitions and deleted ${deletedPartitions.size} partitions from metadata cache " +
s"in response to UpdateMetadata request sent by controller $controllerId epoch $controllerEpoch with correlation id $correlationId")
metadataSnapshot = MetadataSnapshot(partitionStates, topicIds.toMap, controllerIdOpt, aliveBrokers, aliveNodes)
}
deletedPartitions
}
}
def contains(topic: String): Boolean = {
metadataSnapshot.partitionStates.contains(topic)
}
def contains(tp: TopicPartition): Boolean = getPartitionInfo(tp.topic, tp.partition).isDefined
private def removePartitionInfo(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]],
topicIds: mutable.Map[String, Uuid], topic: String, partitionId: Int): Boolean = {
partitionStates.get(topic).exists { infos =>
infos.remove(partitionId)
if (infos.isEmpty) {
partitionStates.remove(topic)
topicIds.remove(topic)
}
true
}
}
case class MetadataSnapshot(partitionStates: mutable.AnyRefMap[String, mutable.LongMap[UpdateMetadataPartitionState]],
topicIds: Map[String, Uuid],
controllerId: Option[Int],
aliveBrokers: mutable.LongMap[Broker],
aliveNodes: mutable.LongMap[collection.Map[ListenerName, Node]])
}
| Chasego/kafka | core/src/main/scala/kafka/server/MetadataCache.scala | Scala | apache-2.0 | 22,392 |
/* File: GettingStarted.scala (Ch 2)
* Authors: Paul Chiusano and Runar Bjarnason
* Url: https://github.com/fpinscala/fpinscala
*
* Description: This is a modified version of the file GettingStarted.scala
* that accompanies the book "Functional Programming in Scala" by
* Chiusano and Bjarnason. This version of the file includes
* solutions to some of the exercises in
*
* CHAPTER 2: Getting Started
*
* The solutions herein are by William DeMeo <williamdemeo@gmail.com>.
* They are at best imperfect, and possibly wrong. Official solutions by
* Chiusano and Bjarnason are available in the github repo mentioned above.
*/
package fpinscala.gettingstarted
import scala.annotation.tailrec
// A comment!
/* Another comment */
/** A documentation comment */
object MyModule {
def abs(n: Int): Int =
if (n < 0) -n
else n
private def formatAbs(x: Int) = {
val msg = "The absolute value of %d is %d"
msg.format(x, abs(x))
}
def main(args: Array[String]): Unit =
println(formatAbs(-42))
// A definition of factorial, using a local, tail recursive function
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if (n <= 0) acc
else go(n-1, n*acc)
go(n, 1)
}
// Another implementation of `factorial`, this time with a `while` loop
def factorial2(n: Int): Int = {
var acc = 1
var i = n
while (i > 0) { acc *= i; i -= 1 }
acc
}
// Exercise 1: Write a function to compute the nth fibonacci number
def fib(n: Int): Int = n match {
case 0 => 1
case 1 => 1
case k if k>1 => fib(k-1) + fib(k-2)
}
def fib_tr(n: Int): Int = {
@annotation.tailrec
def fib_aux(n: Int, prev: Int, curr: Int): Int =
if(n==0) prev
else fib_aux(n-1, curr, prev + curr)
fib_aux(n,0,1)
}
// This definition and `formatAbs` are very similar..
private def formatFactorial(n: Int) = {
val msg = "The factorial of %d is %d."
msg.format(n, factorial(n))
}
// We can generalize `formatAbs` and `formatFactorial` to
// accept a _function_ as a parameter
def formatResult(name: String, n: Int, f: Int => Int) = {
val msg = "The %s of %d is %d."
msg.format(name, n, f(n))
}
}
object FormatAbsAndFactorial {
import MyModule._
// Now we can use our general `formatResult` function
// with both `abs` and `factorial`
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
}
}
object TestFib {
import MyModule._
// test implementation of `fib`
def main(args: Array[String]): Unit = {
println("Expected: 0, 1, 1, 2, 3, 5, 8")
println("Actual: %d, %d, %d, %d, %d, %d, %d".format(fib(0), fib(1), fib(2), fib(3), fib(4), fib(5), fib(6)))
}
}
// Functions get passed around so often in FP that it's
// convenient to have syntax for constructing a function
// *without* having to give it a name
object AnonymousFunctions {
import MyModule._
// Some examples of anonymous functions:
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
println(formatResult("increment", 7, (x: Int) => x + 1))
println(formatResult("increment2", 7, (x) => x + 1))
println(formatResult("increment3", 7, x => x + 1))
println(formatResult("increment4", 7, _ + 1))
println(formatResult("increment5", 7, x => { val r = x + 1; r }))
}
}
object MonomorphicBinarySearch {
// First, a binary search implementation, specialized to `Double`,
// another primitive type in Scala, representing 64-bit floating
// point numbers
// Ideally, we could generalize this to work for any `Array` type,
// so long as we have some way of comparing elements of the `Array`
def binarySearch(ds: Array[Double], key: Double): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val d = ds(mid2) // We index into an array using the same
// syntax as function application
if (d == key) mid2
else if (d > key) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, ds.length - 1)
}
}
object PolymorphicFunctions {
// Here's a polymorphic version of `binarySearch`, parameterized on
// a function for testing whether an `A` is greater than another `A`.
def binarySearch[A](as: Array[A], key: A, gt: (A,A) => Boolean): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val a = as(mid2)
val greater = gt(a, key)
if (!greater && !gt(key,a)) mid2
else if (greater) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, as.length - 1)
}
// Exercise 2: Implement a polymorphic function to check whether
// an `Array[A]` is sorted
def isSorted[A](as: Array[A], gt: (A,A) => Boolean): Boolean = {
def isSorted_aux(n: Int): Boolean = {
if (n >= as.length-1) true
else if (gt(as(n),as(n+1))) isSorted_aux(n+1)
else false
}
isSorted_aux(0)
}// This is just like the official solution, but I'm sorting in ascending order.
// Polymorphic functions are often so constrained by their type
// that they only have one implementation! Here's an example:
def partial1[A,B,C](a: A, f: (A,B) => C): B => C =
(b: B) => f(a, b)
// Exercise 3: Implement `curry`.
/* Ex 2.3 Let's look at another example, currying, which converts a function f
* of two arguments into a function of one argument that partially applies f . Here
* again there's only one implementation that compiles. Write this implementation.
*/
def curry[A,B,C](f: (A, B) => C): A => (B => C) = (a:A) => (b:B) => f(a,b)
// NB: The `Function2` trait has a `curried` method already
// Exercise 4: Implement `uncurry`
/* Ex 2.4 Implement uncurry, which reverses the transformation of curry .
* Note that since => associates to the right, A => (B => C) can be written
* as A => B => C.
*/
def uncurry[A,B,C](f: A => B => C): (A, B) => C = (a, b) => f(a)(b)
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
/* NB: There is a method on the `Function` object in the standard library,
* `Function.uncurried` that you can use for uncurrying.
*
* Note that we can go back and forth between the two forms. We can curry
* and uncurry and the two forms are in some sense "the same". In FP jargon,
* we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
* a term we inherit from ~category theory~ algebra.
*/
/* Let's look at a final example, function composition, which feeds the output
* of one function to the input of another function. Again, the implementation
* of this function is fully determined by its type signature.
*/
// Exercise 5: Implement `compose`
// Ex 2.5: Implement the higher-order function that composes two functions.
def compose[A,B,C](f: B => C, g: A => B): A => C = a => f(g(a))
}
| williamdemeo/fpinscala_wjd | exercises/src/main/scala/fpinscala/gettingstarted/GettingStarted.scala | Scala | mit | 7,297 |
package vexriscv.ip.fpu
object MiaouDiv extends App{
val input = 2.5
var output = 1/(input*0.95)
def y = output
def x = input
for(i <- 0 until 10) {
output = 2 * y - x * y * y
println(output)
}
//output = x*output
println(1/input)
}
object MiaouSqrt extends App{
val input = 2.0
var output = 1/Math.sqrt(input*0.95)
// def x = output
// def y = input
def y = output
def x = input
for(i <- 0 until 10) {
output = y * (1.5 - x * y * y / 2)
println(output)
}
output = x*output
println(output)
println(s"ref ${Math.sqrt(input)}")
}
object MiaouNan extends App{
println(Float.NaN + 3.0f)
println(3.0f + Float.NaN )
println(0.0f*Float.PositiveInfinity )
println(1.0f/0.0f )
println(Float.MaxValue -1 )
println(Float.PositiveInfinity - Float.PositiveInfinity)
} | SpinalHDL/VexRiscv | src/test/scala/vexriscv/ip/fpu/Playground.scala | Scala | mit | 833 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.nio
import slamdata.Predef.{Array, SuppressWarnings, Unit}
import quasar.contrib.fs2.convert
import java.nio.file.{Files, Path}
import cats.effect.Sync
import fs2.Stream
object file {
/** Deletes the path, including all descendants if it refers to a directory. */
def deleteRecursively[F[_]](path: Path)(implicit F: Sync[F]): F[Unit] = {
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def go(p: Path): Stream[F, Unit] =
Stream.eval(F.delay(Files.isDirectory(p)))
.flatMap(isDir =>
if (isDir)
convert.fromJavaStream(F.delay(Files.list(p))).flatMap(go)
else
Stream.empty)
.append(Stream.eval(F.delay(Files.delete(p))))
go(path).compile.drain
}
}
| slamdata/quasar | foundation/src/main/scala/quasar/contrib/nio/file.scala | Scala | apache-2.0 | 1,374 |
package net.khonda.mt2tumblr
case class Blog(id: String, title: String, category: String, date: String, body: String)
| keisukehonda/mt2tumblr | src/main/scala/net/khonda/mt2tumblr/Blog.scala | Scala | mit | 119 |
package org.jetbrains.plugins.scala
package editor.backspaceHandler
import com.intellij.codeInsight.editorActions.BackspaceHandlerDelegate
import com.intellij.openapi.editor.Editor
import lang.psi.api.ScalaFile
import lang.scaladoc.lexer.docsyntax.ScaladocSyntaxElementType
import org.jetbrains.plugins.scala.extensions
import com.intellij.psi.{PsiElement, PsiDocumentManager, PsiFile}
import com.intellij.psi.xml.XmlTokenType
import lang.psi.api.expr.xml.ScXmlStartTag
import lang.scaladoc.lexer.ScalaDocTokenType
import lang.lexer.ScalaTokenTypes
/**
* User: Dmitry Naydanov
* Date: 2/24/12
*/
class ScalaBackspaceHandler extends BackspaceHandlerDelegate {
def beforeCharDeleted(c: Char, file: PsiFile, editor: Editor) {
if (!file.isInstanceOf[ScalaFile]) return
val offset = editor.getCaretModel.getOffset
val element = file.findElementAt(offset - 1)
if (element == null) return
if (needCorrecrWiki(element)) {
extensions.inWriteAction {
val document = editor.getDocument
if (element.getParent.getLastChild != element) {
val tagToDelete = element.getParent.getLastChild
val textLength =
if (tagToDelete.getNode.getElementType != ScalaDocTokenType.DOC_BOLD_TAG) tagToDelete.getTextLength else 1
document.deleteString(tagToDelete.getTextOffset, tagToDelete.getTextOffset + textLength)
} else {
document.deleteString(element.getTextOffset, element.getTextOffset + 2)
editor.getCaretModel.moveCaretRelatively(1, 0, false, false, false)
}
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
} else if (element.getNode.getElementType == XmlTokenType.XML_NAME && element.getParent != null && element.getParent.isInstanceOf[ScXmlStartTag]) {
val openingTag = element.getParent.asInstanceOf[ScXmlStartTag]
val closingTag = openingTag.getClosingTag
if (closingTag != null && closingTag.getTextLength > 3 && closingTag.getText.substring(2, closingTag.getTextLength - 1) == openingTag.getTagName) {
extensions.inWriteAction {
val offsetInName = editor.getCaretModel.getOffset - element.getTextOffset + 1
editor.getDocument.deleteString(closingTag.getTextOffset + offsetInName, closingTag.getTextOffset + offsetInName + 1)
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
}
} else if (element.getNode.getElementType == ScalaTokenTypes.tMULTILINE_STRING && offset - element.getTextOffset == 3) {
correctMultilineString(element.getTextOffset + element.getTextLength - 3)
} else if (element.getNode.getElementType == XmlTokenType.XML_ATTRIBUTE_VALUE_START_DELIMITER && element.getNextSibling != null &&
element.getNextSibling.getNode.getElementType == XmlTokenType.XML_ATTRIBUTE_VALUE_END_DELIMITER) {
extensions.inWriteAction {
editor.getDocument.deleteString(element.getTextOffset + 1, element.getTextOffset + 2)
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
} else if (offset - element.getTextOffset == 3 &&
element.getNode.getElementType == ScalaTokenTypes.tINTERPOLATED_MULTILINE_STRING &&
element.getParent.getLastChild.getNode.getElementType == ScalaTokenTypes.tINTERPOLATED_STRING_END &&
element.getPrevSibling != null &&
element.getPrevSibling.getNode.getElementType == ScalaTokenTypes.tINTERPOLATED_STRING_ID) {
correctMultilineString(element.getParent.getLastChild.getTextOffset)
}
def correctMultilineString(closingQuotesOffset: Int) {
extensions.inWriteAction {
editor.getDocument.deleteString(closingQuotesOffset, closingQuotesOffset + 3)
editor.getCaretModel.moveCaretRelatively(-1, 0, false, false, false)
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
}
def needCorrecrWiki(element: PsiElement) = (element.getNode.getElementType.isInstanceOf[ScaladocSyntaxElementType]
|| element.getText == "{{{") && (element.getParent.getLastChild != element ||
element.getText == "'''" && element.getPrevSibling != null && element.getPrevSibling.getText == "'")
}
def charDeleted(c: Char, file: PsiFile, editor: Editor): Boolean = false
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/editor/backspaceHandler/ScalaBackspaceHandler.scala | Scala | apache-2.0 | 4,393 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api.datasource
import slamdata.Predef.{Exception, Option}
import quasar.Condition
import monocle.macros.Lenses
import scalaz.Show
import scalaz.syntax.show._
@Lenses
final case class DatasourceMeta(
kind: DatasourceType,
name: DatasourceName,
status: Condition[Exception])
object DatasourceMeta extends DatasourceMetaInstances {
def fromOption(
kind: DatasourceType,
name: DatasourceName,
optErr: Option[Exception])
: DatasourceMeta =
DatasourceMeta(kind, name, Condition.optionIso.reverseGet(optErr))
}
sealed abstract class DatasourceMetaInstances {
implicit val show: Show[DatasourceMeta] = {
implicit val exShow: Show[Exception] =
Show.shows(_.getMessage)
Show.shows {
case DatasourceMeta(n, k, s) =>
"DatasourceMeta(" + k.shows + ", " + n.shows + ", " + s.shows + ")"
}
}
}
| slamdata/quasar | api/src/main/scala/quasar/api/datasource/DatasourceMeta.scala | Scala | apache-2.0 | 1,483 |
package com.github.jeroenr.bson.reader
import java.nio.ByteBuffer
import com.github.jeroenr.bson.element.BsonDouble
object BsonDoubleReader extends Reader[BsonDouble] {
def read(buffer: ByteBuffer): Option[BsonDouble] = {
val name = readCString(buffer)
val value = buffer.getDouble()
Some(BsonDouble(name, value))
}
}
| jeroenr/tepkin | bson/src/main/scala/com/github/jeroenr/bson/reader/BsonDoubleReader.scala | Scala | apache-2.0 | 338 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.examples.similarproduct
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]) extends Params
class ALSModel(
val productFeatures: Map[Int, Array[Double]],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
override def toString = {
s" productFeatures: [${productFeatures.size}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends P2LAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
override
def train(sc: SparkContext, data: PreparedData): ALSModel = {
require(!data.viewEvents.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.users.take(1).isEmpty,
s"users in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.users.keys)
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}
.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
new ALSModel(
productFeatures = m.productFeatures.collectAsMap.toMap,
itemStringIntMap = itemStringIntMap,
items = items
)
}
override
def predict(model: ALSModel, query: Query): PredictedResult = {
val productFeatures = model.productFeatures
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector
// productFeatures may not contain the requested item
.map { item => productFeatures.get(item) }
.flatten
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures vector for query items ${query.items}.")
Array[(Int, Double)]()
} else {
productFeatures.par // convert to parallel collection
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.filter(_._2 > 0) // keep items with score > 0
.seq // convert back to sequential collection
.toArray
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
categoryBlackList = query.categoryBlackList,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
// MODIFIED
val it = model.items(i)
ItemScore(
item = model.itemIntStringMap(i),
title = it.title,
date = it.date,
imdbUrl = it.imdbUrl,
score = s
)
}
PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
categoryBlackList: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true) &&
categoryBlackList.map { cat =>
items(i).categories.map { itemCat =>
// discard this item if has ovelap categories with the query
(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(true) // keep this item if it has no categories
}.getOrElse(true)
}
}
| PredictionIO/PredictionIO | examples/scala-parallel-similarproduct/return-item-properties/src/main/scala/ALSAlgorithm.scala | Scala | apache-2.0 | 8,548 |
package intellij.haskell.cabal.highlighting
import com.intellij.lexer.FlexAdapter
class CabalSyntaxHighlightingLexer() extends FlexAdapter(new _CabalSyntaxHighlightingLexer)
| rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/cabal/highlighting/CabalSyntaxHighlightingLexer.scala | Scala | apache-2.0 | 176 |
package se.marcuslonnberg.scaladocker.remote.models.json
import org.joda.time.DateTime
import play.api.libs.functional.syntax._
import play.api.libs.json._
import se.marcuslonnberg.scaladocker.remote.models.json.JsonUtils._
import se.marcuslonnberg.scaladocker.remote.models.{Image, ImageId, ImageName}
trait ImageFormats extends CommonFormats {
implicit val imageFormat = {
((JsPath \\ "Created").format[DateTime](dateTimeSecondsFormat) and
(JsPath \\ "Id").format[ImageId] and
(JsPath \\ "ParentId").format[ImageId] and
(JsPath \\ "RepoTags").formatWithDefault[Seq[ImageName]](Seq.empty) and
(JsPath \\ "Labels").formatWithDefault[Map[String, String]](Map.empty) and
(JsPath \\ "Size").format[Long] and
(JsPath \\ "VirtualSize").format[Long]
)(Image.apply, unlift(Image.unapply))
}
}
| marcuslonnberg/scala-docker | src/main/scala/se/marcuslonnberg/scaladocker/remote/models/json/ImageFormats.scala | Scala | mit | 831 |
package com.arcusys.learn.scorm.tracking.states.impl.liferay
import com.arcusys.learn.persistence.liferay.model.LFObjectiveState
import com.arcusys.learn.persistence.liferay.service.LFObjectiveStateLocalService
import com.arcusys.learn.storage.impl.liferay.MockEntityContainer
import scala.collection.JavaConverters._
object ObjectiveStateEntityContainer extends MockEntityContainer[LFObjectiveStateLocalService, LFObjectiveState] {
lazy val mockLocalService = mock[LFObjectiveStateLocalService]
lazy val mockServiceBeanName = classOf[LFObjectiveStateLocalService].getName
// service related mocks
def createFunction = _.createLFObjectiveState()
def addFunction = _.addLFObjectiveState(_)
def deleteFunction = _.deleteLFObjectiveState(_)
def updateFunction = _.updateLFObjectiveState(_)
def orNull = _.orNull
def getAllFunction = _.getLFObjectiveStates(_, _)
def removeAllFunction = _.removeAll()
// entity related mocks
def createMockEntity() = mock[LFObjectiveState]
def mockEntityProperties(mockEntity: LFObjectiveState) {
mockIntegerProperty(mockEntity.setObjectiveID(_), _.getObjectiveID)
mockIntegerProperty(mockEntity.setActivityStateID(_), _.getActivityStateID)
mockStringProperty(mockEntity.setMapKey(_), _.getMapKey)
mockDecimalProperty(mockEntity.setNormalizedMeasure(_), _.getNormalizedMeasure)
mockBooleanProperty(mockEntity.setSatisfied(_), _.getSatisfied)
}
def getIdFunction = _.getId
mockLocalService.findByMapKeyAndActivityStateID(any, any) answers {
(paramsRaw, mockService) =>
val paramsTuple: (Any, Any) = paramsRaw match {
case Array(a, b) => (a, b)
}
val mapKey = unwrapString(paramsTuple._1)
val stateID = unwrapNullableInteger(paramsTuple._2)
internalStorage.values.find(entity => {
entity.getMapKey == mapKey && entity.getActivityStateID == stateID
}).getOrElse(null)
}
mockLocalService.findByActivityStateID(any, any, any) answers {
(paramsRaw, mockService) =>
val paramsTuple: (Any, Any, Any) = paramsRaw match {
case Array(a, b, c) if a.isInstanceOf[Int] && b.isInstanceOf[Int] && c.isInstanceOf[Int] => (a, b, c)
}
val mapKey = unwrapNullableInteger(paramsTuple._1)
internalStorage.values.filter(entity => {
entity.getActivityStateID == mapKey
}).toList.asJava
}
}
| ViLPy/Valamis | learn-persistence-liferay-wrapper/src/test/scala/com/arcusys/learn/scorm/tracking/states/impl/liferay/ObjectiveStateEntityContainer.scala | Scala | lgpl-3.0 | 2,377 |
package com.github.mdr.graphospasm.grapheditor.figure
import com.github.mdr.graphospasm.grapheditor.model.NodeContentsLayouter
import com.github.mdr.graphospasm.grapheditor.utils.Utils
import com.github.mdr.graphospasm.grapheditor.Plugin
import com.github.mdr.graphospasm.grapheditor.ScaledGraphics
import org.eclipse.swt.graphics.Color
import org.eclipse.draw2d.geometry.Rectangle
import org.eclipse.swt.widgets.Display
import org.eclipse.swt.graphics.Pattern
import org.eclipse.draw2d.geometry.Dimension
import org.eclipse.draw2d.geometry.Point
import org.eclipse.draw2d.{ ScaledGraphics ⇒ _, _ }
import scala.math.{ max, min }
object NodeFigure {
private val shadowSize = 6
private val shadowColour = new Color(null, 192, 192, 192)
private val TITLE_OFFSET = new Dimension(3, 3)
private val gradientColour = new Color(null, 192, 192, 255)
private val gradientHeight = 30
}
class NodeFigure extends Figure {
import NodeFigure._
setLayoutManager(new XYLayout)
private var targetFeedback_ = false
def targetFeedback = targetFeedback_
def targetFeedback_=(b: Boolean) {
targetFeedback_ = b
repaint()
}
private var name_ : String = ""
def name_=(s: String) {
name_ = s
nameLabel.setText(s)
repaint()
}
def name = name_
private val toolTipLabel = new Label
setToolTip(toolTipLabel)
private var toolTipText_ : String = ""
def toolTipText_=(toolTipText: String) {
toolTipText_ = toolTipText
toolTipLabel.setText(toolTipText)
repaint()
}
def toolTipText = toolTipText_
private var nameBounds_ : Rectangle = new Rectangle(0, 0, 0, 0)
def nameBounds = nameBounds_
def nameBounds_=(bounds: Rectangle) {
nameBounds_ = bounds
setConstraint(nameLabel, bounds)
invalidate()
repaint()
}
val nameLabel = new Label(name) {
override def paintFigure(g: Graphics) {
g.setForegroundColor(ColorConstants.black)
super.paintFigure(g)
}
}
add(nameLabel, bounds)
private final var hasAttributes_ = false
def hasAttributes_=(b: Boolean) {
hasAttributes_ = b
}
def hasAttributes = hasAttributes_
def getContentArea(bounds: Rectangle) = bounds.getCopy.resize(-shadowSize, -shadowSize)
override def paintFigure(g: Graphics) {
super.paintFigure(g)
g.pushState()
val contentArea = getContentArea(getBounds)
val display = Display.getDefault
// Shadow
g.setBackgroundColor(shadowColour)
g.fillRoundRectangle(new Rectangle(contentArea.x + shadowSize, contentArea.y + shadowSize, contentArea.width - 1, contentArea.height - 1), 10, 10)
// Background
val backgroundColour = if (targetFeedback) Plugin.backgroundBlue else ColorConstants.white
g.setBackgroundColor(backgroundColour)
g.fillRoundRectangle(new Rectangle(contentArea.x, contentArea.y, contentArea.width - 1, contentArea.height - 1), 10, 10)
// Gradient
g.pushState()
val pattern = new Pattern(display, contentArea.x, contentArea.y, contentArea.x, contentArea.y + gradientHeight, gradientColour, backgroundColour)
if (g.isInstanceOf[ScaledGraphics])
g.asInstanceOf[ScaledGraphics].setBackgroundPattern(display, contentArea.x, contentArea.y, contentArea.x, contentArea.y + gradientHeight, gradientColour, backgroundColour)
else
g.setBackgroundPattern(pattern)
g.fillRoundRectangle(new Rectangle(contentArea.x, contentArea.y, contentArea.width - 1, gradientHeight - 3 /* <= adjusted for a couple of glitches */ ), 10, 10)
pattern.dispose()
g.setBackgroundPattern(null)
g.popState()
// Border
g.setForegroundColor(ColorConstants.black)
g.drawRoundRectangle(new Rectangle(contentArea.x, contentArea.y, contentArea.width - 1, contentArea.height - 1), 10, 10)
// Name - attribute divider line
val titleTextPos = contentArea.getTopLeft.getTranslated(new Dimension((contentArea.width - nameBounds.width) / 2, 3))
val lineY = titleTextPos.y + nameBounds.height + 2
if (hasAttributes_)
g.drawLine(contentArea.x, lineY, contentArea.getRight.x - 1, lineY)
g.popState()
}
val connectionAnchor = new ChopboxAnchor(this)
}
| mdr/graphospasm | com.github.mdr.graphospasm.grapheditor/src/main/scala/com/github/mdr/graphospasm/grapheditor/figure/NodeFigure.scala | Scala | mit | 4,141 |
package com.twitter.finagle.zipkin
import com.twitter.app.GlobalFlag
import com.twitter.finagle.zipkin.thrift.Sampler
import java.net.InetSocketAddress
object host extends GlobalFlag(
new InetSocketAddress("localhost", 1463),
"Host to scribe traces to")
object initialSampleRate extends GlobalFlag(
Sampler.DefaultSampleRate,
"Initial sample rate")
| firebase/finagle | finagle-zipkin/src/main/scala/com/twitter/finagle/zipkin/Flags.scala | Scala | apache-2.0 | 360 |
package org.dsa.iot.rx.core
import org.dsa.iot.rx.RxTransformer
import org.dsa.iot.scala.Having
/**
* Tests whether a predicate holds for some of the elements of the source.
*/
class Exists[T] extends RxTransformer[T, Boolean] {
def predicate(func: T => Boolean): Exists[T] = this having (predicate <~ func)
val predicate = Port[T => Boolean]("predicate")
protected def compute = predicate.in flatMap source.in.exists
}
/**
* Factory for [[Exists]] instances.
*/
object Exists {
/**
* Creates a new Exists instance.
*/
def apply[T]: Exists[T] = new Exists[T]
/**
* Creates a new Exists instance for a given predicate.
*/
def apply[T](predicate: T => Boolean): Exists[T] = {
val block = new Exists[T]
block.predicate <~ predicate
block
}
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/rx/core/Exists.scala | Scala | apache-2.0 | 796 |
/** Copyright 2014 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.data.storage.hbase
import io.prediction.data.storage.Event
import io.prediction.data.storage.DataMap
import io.prediction.data.storage.LEvents
import io.prediction.data.storage.LEventAggregator
import io.prediction.data.storage.StorageError
import io.prediction.data.storage.hbase.HBEventsUtil.RowKey
import io.prediction.data.storage.hbase.HBEventsUtil.RowKeyException
import grizzled.slf4j.Logging
import org.joda.time.DateTime
import org.apache.hadoop.hbase.NamespaceDescriptor
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Scan
import scala.collection.JavaConversions._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
class HBLEvents(val client: HBClient, val namespace: String)
extends LEvents with Logging {
//implicit val formats = DefaultFormats + new EventJson4sSupport.DBSerializer
def resultToEvent(result: Result, appId: Int): Event =
HBEventsUtil.resultToEvent(result, appId)
def getTable(appId: Int) = client.connection.getTable(
HBEventsUtil.tableName(namespace, appId))
override
def init(appId: Int): Boolean = {
// check namespace exist
val existingNamespace = client.admin.listNamespaceDescriptors()
.map(_.getName)
if (!existingNamespace.contains(namespace)) {
val nameDesc = NamespaceDescriptor.create(namespace).build()
info(s"The namespace ${namespace} doesn't exist yet. Creating now...")
client.admin.createNamespace(nameDesc)
}
val tableName = TableName.valueOf(HBEventsUtil.tableName(namespace, appId))
if (!client.admin.tableExists(tableName)) {
info(s"The table ${tableName.getNameAsString()} doesn't exist yet." +
" Creating now...")
val tableDesc = new HTableDescriptor(tableName)
tableDesc.addFamily(new HColumnDescriptor("e"))
tableDesc.addFamily(new HColumnDescriptor("r")) // reserved
client.admin.createTable(tableDesc)
}
true
}
override
def remove(appId: Int): Boolean = {
val tableName = TableName.valueOf(HBEventsUtil.tableName(namespace, appId))
try {
if (client.admin.tableExists(tableName)) {
info(s"Removing table ${tableName.getNameAsString()}...")
client.admin.disableTable(tableName)
client.admin.deleteTable(tableName)
} else {
info(s"Table ${tableName.getNameAsString()} doesn't exist." +
s" Nothing is deleted.")
}
true
} catch {
case e: Exception => {
error(s"Fail to remove table for appId ${appId}. Exception: ${e}")
false
}
}
}
override
def close() = {
client.admin.close()
client.connection.close()
}
override
def futureInsert(event: Event, appId: Int)(implicit ec: ExecutionContext):
Future[Either[StorageError, String]] = {
Future {
val table = getTable(appId)
val (put, rowKey) = HBEventsUtil.eventToPut(event, appId)
table.put(put)
table.flushCommits()
table.close()
Right(rowKey.toString)
}/*.recover {
case e: Exception => Left(StorageError(e.toString))
}*/
}
override
def futureGet(eventId: String, appId: Int)(implicit ec: ExecutionContext):
Future[Either[StorageError, Option[Event]]] = {
Future {
val table = getTable(appId)
val rowKey = RowKey(eventId)
val get = new Get(rowKey.toBytes)
val result = table.get(get)
table.close()
if (!result.isEmpty()) {
val event = resultToEvent(result, appId)
Right(Some(event))
} else {
Right(None)
}
}.recover {
case e: RowKeyException => Left(StorageError(e.toString))
case e: Exception => throw e
}
}
override
def futureDelete(eventId: String, appId: Int)(implicit ec: ExecutionContext):
Future[Either[StorageError, Boolean]] = {
Future {
val table = getTable(appId)
val rowKey = RowKey(eventId)
val exists = table.exists(new Get(rowKey.toBytes))
table.delete(new Delete(rowKey.toBytes))
table.close()
Right(exists)
}
}
override
def futureGetByAppId(appId: Int)(implicit ec: ExecutionContext):
Future[Either[StorageError, Iterator[Event]]] = {
futureFind(
appId = appId,
startTime = None,
untilTime = None,
entityType = None,
entityId = None,
eventNames = None,
limit = None,
reversed = None)
}
override
def futureGetByAppIdAndTime(appId: Int, startTime: Option[DateTime],
untilTime: Option[DateTime])(implicit ec: ExecutionContext):
Future[Either[StorageError, Iterator[Event]]] = {
futureFind(
appId = appId,
startTime = startTime,
untilTime = untilTime,
entityType = None,
entityId = None,
eventNames = None,
limit = None,
reversed = None)
}
override
def futureGetByAppIdAndTimeAndEntity(appId: Int,
startTime: Option[DateTime],
untilTime: Option[DateTime],
entityType: Option[String],
entityId: Option[String])(implicit ec: ExecutionContext):
Future[Either[StorageError, Iterator[Event]]] = {
futureFind(
appId = appId,
startTime = startTime,
untilTime = untilTime,
entityType = entityType,
entityId = entityId,
eventNames = None,
limit = None,
reversed = None)
}
override
def futureFind(
appId: Int,
startTime: Option[DateTime] = None,
untilTime: Option[DateTime] = None,
entityType: Option[String] = None,
entityId: Option[String] = None,
eventNames: Option[Seq[String]] = None,
targetEntityType: Option[Option[String]] = None,
targetEntityId: Option[Option[String]] = None,
limit: Option[Int] = None,
reversed: Option[Boolean] = None)(implicit ec: ExecutionContext):
Future[Either[StorageError, Iterator[Event]]] = {
Future {
val table = getTable(appId)
val scan = HBEventsUtil.createScan(
startTime = startTime,
untilTime = untilTime,
entityType = entityType,
entityId = entityId,
eventNames = eventNames,
targetEntityType = targetEntityType,
targetEntityId = targetEntityId,
reversed = reversed)
val scanner = table.getScanner(scan)
table.close()
val eventsIter = scanner.iterator()
// Get all events if None or Some(-1)
val results: Iterator[Result] = limit match {
case Some(-1) => eventsIter
case None => eventsIter
case Some(x) => eventsIter.take(x)
}
val eventsIt = results.map { resultToEvent(_, appId) }
Right(eventsIt)
}
}
override
def futureAggregateProperties(
appId: Int,
entityType: String,
startTime: Option[DateTime] = None,
untilTime: Option[DateTime] = None,
required: Option[Seq[String]] = None)(implicit ec: ExecutionContext):
Future[Either[StorageError, Map[String, DataMap]]] = {
futureFind(
appId = appId,
startTime = startTime,
untilTime = untilTime,
entityType = Some(entityType),
eventNames = Some(LEventAggregator.eventNames)
).map{ either =>
either.right.map{ eventIt =>
val dm = LEventAggregator.aggregateProperties(eventIt)
if (required.isDefined) {
dm.filter { case (k, v) =>
required.get.map(v.contains(_)).reduce(_ && _)
}
} else dm
}
}
}
override
def futureDeleteByAppId(appId: Int)(implicit ec: ExecutionContext):
Future[Either[StorageError, Unit]] = {
Future {
// TODO: better way to handle range delete
val table = getTable(appId)
val scan = new Scan()
val scanner = table.getScanner(scan)
val it = scanner.iterator()
while (it.hasNext()) {
val result = it.next()
table.delete(new Delete(result.getRow()))
}
scanner.close()
table.close()
Right(())
}
}
}
| TheDataShed/PredictionIO | data/src/main/scala/storage/hbase/HBLEvents.scala | Scala | apache-2.0 | 9,029 |
package fi.pyppe.subtitler
import java.io._
import java.util.zip.GZIPInputStream
import com.ning.http.util.Base64
import org.joda.time.DateTime
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.xml.{Elem, XML}
object MatchedBy extends Enumeration {
type MatchedBy = Value
val MovieHash, Imdb, Tag, FullText = Value
}
import MatchedBy._
case class Subtitle(matchedBy: MatchedBy, idSubMovieFile: String, hash: String, movieByteSize: Long,
idSubtitleFile: String, subFileName: String, idSubtitle: String, languageCode: String,
format: String, cdCount: Int, downloadCount: Int, rating: Double, badCount: Int, idMovie: String,
imdbId: String, movieName: String, movieNameEng: String, movieYear: Int) {
def downloadId: String = idSubtitleFile
def formatSafe: String = Option(format.replace(".", "").toLowerCase).filter(_.nonEmpty).getOrElse("sub")
}
object Subtitle {
def empty: Subtitle = Subtitle(MatchedBy.Tag, "", "", 0L, "", "", "", "eng", "srt", 1, 0, 0.0d, 0, "", "", "", "", 2014)
}
case class SubtitleData(id: String, encodedData: String) {
lazy val content: String = new String(OpenSubtitlesDecoder.decodeAndDecompress(encodedData), "utf-8")
override def toString() = s"SubtitleData($id, encodedLength = ${encodedData.length})"
}
object OpenSubtitlesAPI extends Logging {
import HttpUtils._
import OpenSubtitlesHasher.computeHash
@volatile
private var expiringToken: Option[(String, DateTime)] = None
private val EndPoint = "http://api.opensubtitles.org/xml-rpc"
//private val EndPoint = "http://localhost:8080/xml-rpc"
def serverInfo(): Future[Elem] = {
postXML(EndPoint, <methodCall><methodName>ServerInfo</methodName></methodCall>)
}
private def parseRootMembers(xml: Elem): List[(String, String)] = {
(xml \\\\ "member").map { member =>
(member \\ "name").text.trim -> (member \\ "value").text.trim
}.toList
}
def parseSearchResponse(xml: Elem): List[Subtitle] = {
def matchedBy(value: String) = value match {
case "moviehash" => MovieHash
case "imdbid" => Imdb
case "tag" => Tag
case "fulltext" => FullText
}
def subtitle(fields: Map[String, String]) = Subtitle(
matchedBy(fields("MatchedBy")),
fields("IDSubMovieFile"),
fields("MovieHash"),
fields("MovieByteSize").toLong,
fields("IDSubtitleFile"),
fields("SubFileName"),
fields("IDSubtitle"),
fields("ISO639"),
fields("SubFormat"),
fields("SubSumCD").toInt,
fields("SubDownloadsCnt").toInt,
fields("SubRating").toDouble,
fields("SubBad").toInt,
fields("IDMovie"),
fields("IDMovieImdb"),
fields("MovieName"),
fields("MovieNameEng"),
fields("MovieYear").toInt
)
val dataMembers = (for {
member <- xml \\\\ "member" if (member \\ "name").text == "data"
dataMember <- member \\\\ "struct" \\\\ "member"
} yield {
(dataMember \\ "name").text.trim -> (dataMember \\ "value").text.trim
}).toList
val (acc, fields) = dataMembers.foldLeft((List.empty[Subtitle], Map.empty[String, String])) {
case ((acc, fields), (key, value)) =>
key match {
case "MatchedBy" if fields.nonEmpty =>
(subtitle(fields) :: acc) -> Map(key -> value)
case _ =>
acc -> (fields + (key -> value))
}
}
val results =
if (fields.nonEmpty)
(subtitle(fields) :: acc).reverse
else
acc.reverse
results.groupBy(_.downloadId).values.map(_.head).toList
}
private def withValidToken[T](action: String => Future[T])
(implicit settings: Settings): Future[T] = {
val future = expiringToken.filter( _._2.plusMinutes(14).isAfterNow ).map {
case (token, _) =>
action(token)
}.getOrElse {
logIn flatMap action
}
future.onSuccess {
case _ =>
expiringToken = expiringToken.map {
case (token, _) => (token, DateTime.now)
}
}
future
}
def logIn()(implicit settings: Settings): Future[String] = {
val os = settings.openSubtitlesConf
val language = "en"
val req =
<methodCall>
<methodName>LogIn</methodName>
<params>
<param><value><string>{os.login}</string></value></param>
<param><value><string>{os.password}</string></value></param>
<param><value><string>{language}</string></value></param>
<param><value><string>{os.useragent}</string></value></param>
</params>
</methodCall>
postXML(EndPoint, req).map { xml =>
val members = parseRootMembers(xml).toMap
val status = members("status")
require(status == "200 OK", s"Invalid status: $status")
val responseToken = members("token")
expiringToken = Some((responseToken, DateTime.now))
logger.info(s"Successfully logged-in with token $responseToken")
responseToken
}
}
def searchSubtitles(f: File)(implicit s: Settings): Future[List[(Subtitle, Double)]] = withValidToken { _ =>
logger.debug(s"Finding subtitles for ${f.getName}")
searchSubtitlesByTag(f.getName) zip searchSubtitlesByFileHash(f) map {
case (byTag, byHash) =>
val tagIds = byTag.map(_.downloadId).toSet
val subtitles = byTag ++ byHash.filter(s => tagIds(s.downloadId))
SubtitleScorer.scoreAndSortCandidates(f.getName, subtitles)
}
}
def searchSubtitle(f: File)(implicit s: Settings): Future[Option[Subtitle]] = {
searchSubtitles(f).map { candidates =>
val count = candidates.length
candidates.headOption.map(_._1).map { best =>
logger.debug(s"Found subtitle ${best.subFileName} for ${f.getName} out of $count candidates")
best
}.orElse {
logger.debug(s"Could not find subtitle for ${f.getName} (out of $count candidates)")
None
}
}
}
def downloadSubtitles(subtitles: Subtitle*)
(implicit s: Settings): Future[List[(Subtitle, SubtitleData)]] = withValidToken { token =>
logger.debug(s"Downloading subtitles: ${subtitles.map(_.subFileName).mkString(" ")}")
downloadSubtitleIds(subtitles.map(_.downloadId): _*).map { datas =>
for {
data <- datas
subtitle <- subtitles.find(_.downloadId == data.id)
} yield {
subtitle -> data
}
}
}
def downloadSubtitleIds(ids: String*)(implicit s: Settings): Future[List[SubtitleData]] = withValidToken { token =>
val idValues = ids.map { id =>
<value><string>{id}</string></value>
}
val req =
<methodCall>
<methodName>DownloadSubtitles</methodName>
<params>
<param><value><string>{token}</string></value></param>
<param>
<value>
<array>
<data>
{idValues}
</data>
</array>
</value>
</param>
</params>
</methodCall>
postXML(EndPoint, req).map { xml =>
val values = (xml \\\\ "array" \\\\ "member").map { member =>
(member \\ "value").text.trim
}
values.grouped(2).collect {
case Seq(a, b) => SubtitleData(a, b)
}.toList
}
}
def searchSubtitlesByTag(tag: String)(implicit s: Settings) =
search("tag" -> tag)
def searchSubtitlesByFileHash(file: File)(implicit s: Settings) = {
val hash = computeHash(file)
val size = file.length
search(
"moviehash" -> hash,
"moviebytesize" -> size
)
}
private def search(values: (String, Any)*)
(implicit s: Settings): Future[List[Subtitle]] = withValidToken { token =>
val valuesWithLanguage = values :+ ("sublanguageid", s.languages.map(_.id).mkString(","))
postXML(EndPoint, searchSubtitlesQuery(token, valuesWithLanguage: _*)).
map(parseSearchResponse)
}
private def searchSubtitlesQuery(token: String, values: (String, Any)*): Elem = {
val members = values.map {
case (key, value) =>
val typedValue = value match {
case i: Int => <int>{i}</int>
case l: Long => <int>{l}</int>
case s: String => <string>{s}</string>
}
<member>
<name>{key}</name>
<value>{typedValue}</value>
</member>
}.toList
<methodCall>
<methodName>SearchSubtitles</methodName>
<params>
<param><value><string>{token}</string></value></param>
<param>
<value>
<array>
<data>
<value><struct>{members}</struct></value>
</data>
</array>
</value>
</param>
<param>
<value>
<struct>
<member>
<name>limit</name>
<value><int>500</int></value>
</member>
</struct>
</value>
</param>
</params>
</methodCall>
}
}
object OpenSubtitlesDecoder {
def decodeAndDecompress(encodedData: String): Array[Byte] = {
val decodedBytes = Base64.decode(encodedData)
val gis = new GZIPInputStream(new ByteArrayInputStream(decodedBytes))
val buf = new Array[Byte](1024)
val baos = new ByteArrayOutputStream()
val out = new BufferedOutputStream(baos)
try {
var n = gis.read(buf)
while (n >= 0) {
out.write(buf, 0, n)
n = gis.read(buf)
}
} finally {
out.flush
out.close
}
baos.toByteArray
}
}
// See http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes#Scala
object OpenSubtitlesHasher {
import java.nio.{LongBuffer, ByteOrder, ByteBuffer}
import java.nio.channels.FileChannel.MapMode
import scala.math._
private val hashChunkSize = 64L * 1024L
def computeHash(file: File) : String = {
val fileSize = file.length
val chunkSizeForFile = min(fileSize, hashChunkSize)
val fileChannel = new FileInputStream(file).getChannel
try {
val head = computeHashForChunk(fileChannel.map(MapMode.READ_ONLY, 0, chunkSizeForFile))
val tail = computeHashForChunk(fileChannel.map(MapMode.READ_ONLY, max(fileSize - hashChunkSize, 0), chunkSizeForFile))
"%016x".format(fileSize + head + tail)
} finally {
fileChannel.close()
}
}
private def computeHashForChunk(buffer: ByteBuffer) : Long = {
def doCompute(longBuffer: LongBuffer, hash: Long) : Long = {
if (!longBuffer.hasRemaining)
hash
else
doCompute(longBuffer, hash + longBuffer.get)
}
val longBuffer = buffer.order(ByteOrder.LITTLE_ENDIAN).asLongBuffer()
doCompute(longBuffer, 0L)
}
}
| Pyppe/subtitler | src/main/scala/fi/pyppe/subtitler/OpenSubtitlesAPI.scala | Scala | mit | 10,766 |
/**
* CPNetSolver
* Copyright (C) 2013 Francesco Burato, Simone Carriero
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*
* File: POGCreator.scala
* Package: solver
* Autore: Francesco Burato
* Creazione: 31/lug/2013
*/
package solver
import edu.uci.ics.jung.graph.{ Graph, DirectedSparseGraph }
import constraintobjs.{Domain,Ordini,Comparator}
import scala.collection.mutable.HashMap
/**
* @author Francesco Burato
* Generatore del grafo degli ordini parziali delle soluzioni.
*/
class POGCreator {
// inizio costruttore di classe
// inizializzo le strutture dati di appoggio
val variables : Array[String] = new Array[String](Domain.domains.size)
val comparators : Array[Comparator] = new Array[Comparator](Domain.domains.size)
var edge = 0
private val domains = new HashMap[String, Array[String]]
init()
private def init() {
// popolo l'array delle variabili e degli ordini
var i = 0
Domain.domains.foreach {
case (s,d) =>
comparators(i) = Ordini(s) match {
case None => throw new Exception("The orders have not been completely initialized")
case Some(x) => x.comparator
}
variables(i) = s
// aggiungo tutti gli elementi accettati ai domini linearizzati
domains += (s -> d.accepted.toArray)
i += 1
}
}
//fine costruttore di classe
def getGraph() : Graph[String,String] = {
// inizializzo il contatore
var edge = 0
val counter = new ClassicCounter(variables.size)
for(i <- 0 until variables.size)
counter.setSleeve(i, domains(variables(i)).size)
assert(counter.init)
// inizializzo il grafo
val res = new DirectedSparseGraph[String,String]
// costruisco il grafo
while(!counter.end) {
processAssign(counter, res)
counter++
}
// l'ultimo assegnamento non ha successori, quindi non ha senso elaborarlo
res
}
private def processAssign(counter : Counter , graph : DirectedSparseGraph[String,String]) {
val startingNode = nodeName(counter)
// eseguo tutti i flip con un VariationCounter
val variation = new VariationCounter(counter)
var updatedVariable = variation++;
while(updatedVariable != -1) {
// prelevo ed azzero il comparatore
val comparator = comparators(updatedVariable)
val originalValue = domains(variables(updatedVariable))(counter(updatedVariable)) //assegnamento alla variabile di partenza
val newValue = domains(variables(updatedVariable))(variation(updatedVariable)) //valore alla variabile variato
val reachingNode = nodeName(variation)
comparator.reset
var i = 0
var completed = false
while(!completed && i < variation.size){
//aggiungo l'assegnamento della variabile i-esima al comparatore
completed = comparator.put(variables(i), domains(variables(i))(variation(i)))
i+= 1
}
assert(completed)
// eseguo il confronto tra i due valori di differenza
comparator.isMinor(originalValue,newValue) match {
case None => assert(false)
case Some(x) =>
if(x)
graph.addEdge(""+edge,reachingNode,startingNode)
else
graph.addEdge(""+edge,startingNode,reachingNode)
}
edge +=1
updatedVariable = variation++
}
}
private def nodeName(counter : Counter) : String = {
var res = ""
var between = ""
for(i <- 0 until counter.size) {
res = res + between + domains(variables(i))(counter(i))
between = ","
}
res
}
} | fburato/CPNetSolver | CPNetSolver/src/solver/POGCreator.scala | Scala | gpl-3.0 | 4,170 |
import org.apache.hadoop.hbase.client.{HBaseAdmin, Result}
import org.apache.hadoop.hbase.{ HBaseConfiguration, HTableDescriptor }
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, HBaseConfiguration}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark._
object HBaseRead {
def main(args: Array[String]) {
val sparkConf = new SparkConf().setAppName("HBaseRead").setMaster("local[2]")
val sc = new SparkContext(sparkConf)
val conf = HBaseConfiguration.create()
val tableName = "table1"
System.setProperty("user.name", "hdfs")
System.setProperty("HADOOP_USER_NAME", "hdfs")
conf.set("hbase.master", "localhost:16000")
conf.setInt("timeout", 12000)
conf.set("hbase.zookeeper.quorum", "localhost")
// Remember to change the znode parent.
// Could be: /hbase-unsecure, /hbase-secure, /hbase
conf.set("zookeeper.znode.parent", "/hbase-unsecure")
conf.set(TableInputFormat.INPUT_TABLE, tableName)
val admin = new HBaseAdmin(conf)
while(true) {
if (!admin.isTableAvailable(tableName)) {
println("before create table")
val tableDesc = new HTableDescriptor(tableName)
val idsColumnFamilyDesc = new HColumnDescriptor(Bytes.toBytes("ids"))
tableDesc.addFamily(idsColumnFamilyDesc)
admin.createTable(tableDesc)
}
println("after create table")
val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
println("Number of Records found : " + hBaseRDD.count())
Thread sleep 10000
}
sc.stop()
}
}
| bucaojit/Hortonworks-Tools | spark-tools/hbase-auth-work/sparkhbase.scala | Scala | apache-2.0 | 1,729 |
package baskingcat.game
import org.lwjgl._
import java.io._
object Message {
def error(e: Throwable, message: String) = {
Sys.alert("Error", message)
// logをとる
throw e
}
def systemError(e: Throwable) = error(e, "システムエラー")
def fileNotFoundError(e: Throwable, fileName: String) = error(e, fileName + "が見つかりません。")
}
| halcat0x15a/gamelib | src/main/scala/baskingcat/game/Message.scala | Scala | bsd-3-clause | 377 |
package com.regblanc.sgl.snake
package desktop
import core._
import sgl.{InputHelpersComponent, GameLoopStatisticsComponent}
import sgl.awt._
import sgl.awt.util._
/** Wire backend to the App here */
object Main extends AWTApp with AbstractApp
with InputHelpersComponent with VerboseStdErrLoggingProvider {
override val TargetFps = Some(60)
override val frameDimension = (TotalWidth, TotalHeight)
}
| regb/scala-game-library | examples/snake/desktop-awt/src/main/scala/Main.scala | Scala | mit | 412 |
package temportalist.esotericraft.emulation.client
import javax.annotation.Nullable
import net.minecraft.client.Minecraft
import net.minecraft.client.renderer.entity.{Render, RenderLivingBase}
import net.minecraft.entity.EntityLivingBase
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import scala.collection.{JavaConversions, mutable}
/**
*
* Created by TheTemportalist on 5/8/2016.
*
* @author TheTemportalist
*/
@SideOnly(Side.CLIENT)
object ModelHandler {
private val entityModels = mutable.Map[Class[_], EntityModel[_ <: EntityLivingBase, _ <: EntityLivingBase]]()
def loadEntityModels(): Unit = {
val map = JavaConversions.mapAsScalaMap(Minecraft.getMinecraft.getRenderManager.entityRenderMap)
for (entry <- map) {
if (classOf[EntityLivingBase].isAssignableFrom(entry._1)) {
this.loadModel(
entry._1.asInstanceOf[Class[_ <: EntityLivingBase]],
entry._2.asInstanceOf[Render[_ <: EntityLivingBase]]
)
}
}
}
def loadModel[C <: EntityLivingBase, R <: EntityLivingBase](clazz: Class[C], renderer: Render[R]): Unit = {
renderer match {
case renderLiving: RenderLivingBase[R] =>
this.entityModels.put(clazz, new EntityModel(clazz, renderer, renderLiving.getMainModel))
case _ =>
}
}
@Nullable
def getEntityModel[E <: EntityLivingBase](entity: E): EntityModel[E, E] = {
getEntityModel(entity.getClass.asInstanceOf[Class[E]])
}
@Nullable
def getEntityModel[E <: EntityLivingBase](clazz: Class[E]): EntityModel[E, E] = {
var classCurrent: Class[_] = clazz
var info: EntityModel[E, E] = null
while (clazz != classOf[EntityLivingBase] && info == null) {
info = this.entityModels(classCurrent).asInstanceOf[EntityModel[E, E]]
classCurrent = classCurrent.getSuperclass
}
//Galvanize.log("Found render for " + clazz.getSimpleName + " to " + classCurrent.getSimpleName)
info
}
}
| TheTemportalist/EsoTeriCraft | src/main/scala/temportalist/esotericraft/emulation/client/ModelHandler.scala | Scala | apache-2.0 | 1,877 |
package io.youi.component.types
import io.youi.Stringify
sealed abstract class TextOverflow(val value: String)
object TextOverflow extends Stringify[TextOverflow] {
case object Clip extends TextOverflow("clip")
case object Ellipsis extends TextOverflow("ellipsis")
lazy val map: Map[String, TextOverflow] = List(Clip, Ellipsis).map(f => f.value -> f).toMap
override def fromString(value: String): Option[TextOverflow] = map.get(value.toLowerCase)
override def toString(value: TextOverflow): Option[String] = if (value == Clip) {
Option.empty[String]
} else {
Some(value.value)
}
} | outr/youi | gui/src/main/scala/io/youi/component/types/TextOverflow.scala | Scala | mit | 608 |
/**
* Copyright 2013 Israel Freitas (israel.araujo.freitas@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package brain.models
import com.ansvia.graph.BlueprintsWrapper._
case class Configuration(rootId:String, defaultDepthTraverse:Int) extends DbObject | ifreitas/brain | src/main/scala/brain/models/Configuration.scala | Scala | apache-2.0 | 785 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.db
import java.sql.SQLException
import org.jdbcdslog.LogSqlDataSource
import org.specs2.mutable.{ After, Specification }
class DatabasesSpec extends Specification {
"Databases" should {
"create database" in new WithDatabase {
val db = Databases(name = "test", driver = "org.h2.Driver", url = "jdbc:h2:mem:test")
db.name must_== "test"
db.url must_== "jdbc:h2:mem:test"
}
"create database with named arguments" in new WithDatabase {
val db = Databases(name = "test", driver = "org.h2.Driver", url = "jdbc:h2:mem:test")
db.name must_== "test"
db.url must_== "jdbc:h2:mem:test"
}
"create default database" in new WithDatabase {
val db = Databases(driver = "org.h2.Driver", url = "jdbc:h2:mem:default")
db.name must_== "default"
db.url must_== "jdbc:h2:mem:default"
}
"create database with log sql" in new WithDatabase {
val config = Map("logSql" -> "true")
val db = Databases(driver = "org.h2.Driver", url = "jdbc:h2:mem:default", config = config)
db.dataSource must beAnInstanceOf[LogSqlDataSource]
}
"create default in-memory database" in new WithDatabase {
val db = Databases.inMemory()
db.name must_== "default"
db.url must beEqualTo("jdbc:h2:mem:default")
}
"create named in-memory database" in new WithDatabase {
val db = Databases.inMemory(name = "test")
db.name must_== "test"
db.url must beEqualTo("jdbc:h2:mem:test")
}
"create in-memory database with url options" in new WithDatabase {
val db = Databases.inMemory(urlOptions = Map("MODE" -> "MySQL"))
db.name must_== "default"
db.url must_== "jdbc:h2:mem:default;MODE=MySQL"
}
"create in-memory database with url as is when there are no additional options" in new WithDatabase {
val db = Databases.inMemory()
db.name must_== "default"
db.url must_== "jdbc:h2:mem:default"
}
"supply connections" in new WithDatabase {
val db = Databases.inMemory(name = "test-connection")
val connection = db.getConnection
connection.createStatement.execute("create table test (id bigint not null, name varchar(255))")
connection.close()
}
"enable autocommit on connections by default" in new WithDatabase {
val db = Databases.inMemory(name = "test-autocommit")
val c1 = db.getConnection
val c2 = db.getConnection
try {
c1.createStatement.execute("create table test (id bigint not null, name varchar(255))")
c1.createStatement.execute("insert into test (id, name) values (1, 'alice')")
val results = c2.createStatement.executeQuery("select * from test")
results.next must beTrue
results.next must beFalse
} finally {
c1.close()
c2.close()
}
}
"provide connection helper" in new WithDatabase {
val db = Databases.inMemory(name = "test-withConnection")
db.withConnection { c =>
c.createStatement.execute("create table test (id bigint not null, name varchar(255))")
c.createStatement.execute("insert into test (id, name) values (1, 'alice')")
val results = c.createStatement.executeQuery("select * from test")
results.next must beTrue
results.next must beFalse
}
}
"provide transaction helper" in new WithDatabase {
val db = Databases.inMemory(name = "test-withTransaction")
db.withTransaction { c =>
c.createStatement.execute("create table test (id bigint not null, name varchar(255))")
c.createStatement.execute("insert into test (id, name) values (1, 'alice')")
}
db.withConnection { c =>
val results = c.createStatement.executeQuery("select * from test")
results.next must beTrue
results.next must beFalse
}
db.withTransaction { c =>
c.createStatement.execute("insert into test (id, name) values (2, 'bob')")
throw new RuntimeException("boom")
success
} must throwA[RuntimeException](message = "boom")
db.withConnection { c =>
val results = c.createStatement.executeQuery("select * from test")
results.next must beTrue
results.next must beFalse
}
}
"not supply connections after shutdown" in {
val db = Databases.inMemory(name = "test-shutdown")
db.getConnection.close()
db.shutdown()
db.getConnection.close() must throwA[SQLException].like {
case e => e.getMessage must endWith("has been closed.")
}
}
"not supply connections after shutdown a database with log sql" in {
val config = Map("logSql" -> "true")
val db = Databases(driver = "org.h2.Driver", url = "jdbc:h2:mem:default", config = config)
db.getConnection.close()
db.shutdown()
db.getConnection.close() must throwA[SQLException]
}
}
trait WithDatabase extends After {
def db: Database
def after = () //db.shutdown()
}
}
| Shenker93/playframework | framework/src/play-jdbc/src/test/scala/play/api/db/DatabasesSpec.scala | Scala | apache-2.0 | 5,101 |
package mesosphere.marathon
package core.matcher.reconcile
import mesosphere.marathon.core.matcher.base.OfferMatcher
import mesosphere.marathon.core.matcher.reconcile.impl.OfferMatcherReconciler
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.storage.repository.GroupRepository
class OfferMatcherReconciliationModule(instanceTracker: InstanceTracker, groupRepository: GroupRepository) {
/** An offer matcher that performs reconciliation on the expected reservations. */
lazy val offerMatcherReconciler: OfferMatcher = new OfferMatcherReconciler(instanceTracker, groupRepository)
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/core/matcher/reconcile/OfferMatcherReconciliationModule.scala | Scala | apache-2.0 | 628 |
package controllers.namedslices
import play.api.mvc._
import controllers.DelvingController
import com.escalatesoft.subcut.inject.BindingModule
import models.NamedSlice
import models.cms.CMSPage
import controllers.search.SearchResults
/**
*
* @author Manuel Bernhardt <bernhardt.manuel@gmail.com>
*/
class NamedSlices(implicit val bindingModule: BindingModule) extends DelvingController with SearchResults {
def view(key: String) = Root {
MultitenantAction {
implicit request =>
NamedSlice.dao.findOnePublishedByKey(key) map { slice =>
val pageContent = CMSPage.dao.findByKeyAndLanguage(slice.cmsPageKey, getLang.language).headOption.map { page =>
page.content
} getOrElse {
""
}
Ok(Template('pageContent -> pageContent, 'name -> slice.name, 'key -> slice.key))
} getOrElse {
NotFound(key)
}
}
}
def search(key: String, query: String): Action[AnyContent] = Root {
MultitenantAction {
implicit request =>
NamedSlice.dao.findOnePublishedByKey(key) map { slice =>
searchResults(query, slice.query.toQueryFilter, s"/slices/${slice.key}/search")
} getOrElse {
NotFound(key)
}
}
}
}
| delving/culture-hub | modules/namedSlices/app/controllers/namedslices/NamedSlices.scala | Scala | apache-2.0 | 1,264 |
package wow.realm.protocol.payloads
import wow.realm.entities.Guid
import wow.realm.protocol._
import scodec.Codec
import scodec.codecs._
case class ClientCharacterDelete(guid: Guid) extends Payload with ClientSide
object ClientCharacterDelete {
implicit val opCodeProvider: OpCodeProvider[ClientCharacterDelete] = OpCodes.CharDelete
implicit val codec: Codec[ClientCharacterDelete] = ("guid" | Guid.codec).as[ClientCharacterDelete]
}
case class ServerCharacterDelete(responseCode: CharacterDeletionResults.Value) extends Payload with ServerSide
object ServerCharacterDelete {
implicit val opCodeProvider: OpCodeProvider[ServerCharacterDelete] = OpCodes.SCharDelete
implicit val codec: Codec[ServerCharacterDelete] =
("responseCode" | Codec[CharacterDeletionResults.Value]).as[ServerCharacterDelete]
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/realm/protocol/payloads/CharacterDelete.scala | Scala | mit | 823 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.