code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import java.beans.{Introspector, PropertyDescriptor}
import java.lang.{Iterable => JIterable}
import java.lang.reflect.Type
import java.util.{Iterator => JIterator, List => JList, Map => JMap}
import scala.language.existentials
import com.google.common.reflect.TypeToken
import org.apache.spark.sql.catalyst.analysis.{GetColumnByOrdinal, UnresolvedAttribute, UnresolvedExtractValue}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Type-inference utilities for POJOs and Java collections.
*/
object JavaTypeInference {
private val iterableType = TypeToken.of(classOf[JIterable[_]])
private val mapType = TypeToken.of(classOf[JMap[_, _]])
private val listType = TypeToken.of(classOf[JList[_]])
private val iteratorReturnType = classOf[JIterable[_]].getMethod("iterator").getGenericReturnType
private val nextReturnType = classOf[JIterator[_]].getMethod("next").getGenericReturnType
private val keySetReturnType = classOf[JMap[_, _]].getMethod("keySet").getGenericReturnType
private val valuesReturnType = classOf[JMap[_, _]].getMethod("values").getGenericReturnType
/**
* Infers the corresponding SQL data type of a JavaBean class.
* @param beanClass Java type
* @return (SQL data type, nullable)
*/
def inferDataType(beanClass: Class[_]): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanClass))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param beanType Java type
* @return (SQL data type, nullable)
*/
private[sql] def inferDataType(beanType: Type): (DataType, Boolean) = {
inferDataType(TypeToken.of(beanType))
}
/**
* Infers the corresponding SQL data type of a Java type.
* @param typeToken Java type
* @return (SQL data type, nullable)
*/
private def inferDataType(typeToken: TypeToken[_], seenTypeSet: Set[Class[_]] = Set.empty)
: (DataType, Boolean) = {
typeToken.getRawType match {
case c: Class[_] if c.isAnnotationPresent(classOf[SQLUserDefinedType]) =>
(c.getAnnotation(classOf[SQLUserDefinedType]).udt().newInstance(), true)
case c: Class[_] if UDTRegistration.exists(c.getName) =>
val udt = UDTRegistration.getUDTFor(c.getName).get.newInstance()
.asInstanceOf[UserDefinedType[_ >: Null]]
(udt, true)
case c: Class[_] if c == classOf[java.lang.String] => (StringType, true)
case c: Class[_] if c == classOf[Array[Byte]] => (BinaryType, true)
case c: Class[_] if c == java.lang.Short.TYPE => (ShortType, false)
case c: Class[_] if c == java.lang.Integer.TYPE => (IntegerType, false)
case c: Class[_] if c == java.lang.Long.TYPE => (LongType, false)
case c: Class[_] if c == java.lang.Double.TYPE => (DoubleType, false)
case c: Class[_] if c == java.lang.Byte.TYPE => (ByteType, false)
case c: Class[_] if c == java.lang.Float.TYPE => (FloatType, false)
case c: Class[_] if c == java.lang.Boolean.TYPE => (BooleanType, false)
case c: Class[_] if c == classOf[java.lang.Short] => (ShortType, true)
case c: Class[_] if c == classOf[java.lang.Integer] => (IntegerType, true)
case c: Class[_] if c == classOf[java.lang.Long] => (LongType, true)
case c: Class[_] if c == classOf[java.lang.Double] => (DoubleType, true)
case c: Class[_] if c == classOf[java.lang.Byte] => (ByteType, true)
case c: Class[_] if c == classOf[java.lang.Float] => (FloatType, true)
case c: Class[_] if c == classOf[java.lang.Boolean] => (BooleanType, true)
case c: Class[_] if c == classOf[java.math.BigDecimal] => (DecimalType.SYSTEM_DEFAULT, true)
case c: Class[_] if c == classOf[java.math.BigInteger] => (DecimalType.BigIntDecimal, true)
case c: Class[_] if c == classOf[java.sql.Date] => (DateType, true)
case c: Class[_] if c == classOf[java.sql.Timestamp] => (TimestampType, true)
case _ if typeToken.isArray =>
val (dataType, nullable) = inferDataType(typeToken.getComponentType, seenTypeSet)
(ArrayType(dataType, nullable), true)
case _ if iterableType.isAssignableFrom(typeToken) =>
val (dataType, nullable) = inferDataType(elementType(typeToken), seenTypeSet)
(ArrayType(dataType, nullable), true)
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val (keyDataType, _) = inferDataType(keyType, seenTypeSet)
val (valueDataType, nullable) = inferDataType(valueType, seenTypeSet)
(MapType(keyDataType, valueDataType, nullable), true)
case other =>
if (seenTypeSet.contains(other)) {
throw new UnsupportedOperationException(
"Cannot have circular references in bean class, but got the circular reference " +
s"of class $other")
}
// TODO: we should only collect properties that have getter and setter. However, some tests
// pass in scala case class as java bean class which doesn't have getter and setter.
val properties = getJavaBeanReadableProperties(other)
val fields = properties.map { property =>
val returnType = typeToken.method(property.getReadMethod).getReturnType
val (dataType, nullable) = inferDataType(returnType, seenTypeSet + other)
new StructField(property.getName, dataType, nullable)
}
(new StructType(fields), true)
}
}
def getJavaBeanReadableProperties(beanClass: Class[_]): Array[PropertyDescriptor] = {
val beanInfo = Introspector.getBeanInfo(beanClass)
beanInfo.getPropertyDescriptors.filterNot(_.getName == "class")
.filter(_.getReadMethod != null)
}
private def getJavaBeanReadableAndWritableProperties(
beanClass: Class[_]): Array[PropertyDescriptor] = {
getJavaBeanReadableProperties(beanClass).filter(_.getWriteMethod != null)
}
private def elementType(typeToken: TypeToken[_]): TypeToken[_] = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JIterable[_]]]
val iterableSuperType = typeToken2.getSupertype(classOf[JIterable[_]])
val iteratorType = iterableSuperType.resolveType(iteratorReturnType)
iteratorType.resolveType(nextReturnType)
}
private def mapKeyValueType(typeToken: TypeToken[_]): (TypeToken[_], TypeToken[_]) = {
val typeToken2 = typeToken.asInstanceOf[TypeToken[_ <: JMap[_, _]]]
val mapSuperType = typeToken2.getSupertype(classOf[JMap[_, _]])
val keyType = elementType(mapSuperType.resolveType(keySetReturnType))
val valueType = elementType(mapSuperType.resolveType(valuesReturnType))
keyType -> valueType
}
/**
* Returns the Spark SQL DataType for a given java class. Where this is not an exact mapping
* to a native type, an ObjectType is returned.
*
* Unlike `inferDataType`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers.
*/
private def inferExternalType(cls: Class[_]): DataType = cls match {
case c if c == java.lang.Boolean.TYPE => BooleanType
case c if c == java.lang.Byte.TYPE => ByteType
case c if c == java.lang.Short.TYPE => ShortType
case c if c == java.lang.Integer.TYPE => IntegerType
case c if c == java.lang.Long.TYPE => LongType
case c if c == java.lang.Float.TYPE => FloatType
case c if c == java.lang.Double.TYPE => DoubleType
case c if c == classOf[Array[Byte]] => BinaryType
case _ => ObjectType(cls)
}
/**
* Returns an expression that can be used to deserialize an internal row to an object of java bean
* `T` with a compatible schema. Fields of the row will be extracted using UnresolvedAttributes
* of the same name as the constructor arguments. Nested classes will have their fields accessed
* using UnresolvedExtractValue.
*/
def deserializerFor(beanClass: Class[_]): Expression = {
deserializerFor(TypeToken.of(beanClass), None)
}
private def deserializerFor(typeToken: TypeToken[_], path: Option[Expression]): Expression = {
/** Returns the current path with a sub-field extracted. */
def addToPath(part: String): Expression = path
.map(p => UnresolvedExtractValue(p, expressions.Literal(part)))
.getOrElse(UnresolvedAttribute(part))
/** Returns the current path or `GetColumnByOrdinal`. */
def getPath: Expression = path.getOrElse(GetColumnByOrdinal(0, inferDataType(typeToken)._1))
typeToken.getRawType match {
case c if !inferExternalType(c).isInstanceOf[ObjectType] => getPath
case c if c == classOf[java.lang.Short] ||
c == classOf[java.lang.Integer] ||
c == classOf[java.lang.Long] ||
c == classOf[java.lang.Double] ||
c == classOf[java.lang.Float] ||
c == classOf[java.lang.Byte] ||
c == classOf[java.lang.Boolean] =>
StaticInvoke(
c,
ObjectType(c),
"valueOf",
getPath :: Nil,
propagateNull = true)
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaDate",
getPath :: Nil,
propagateNull = true)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
ObjectType(c),
"toJavaTimestamp",
getPath :: Nil,
propagateNull = true)
case c if c == classOf[java.lang.String] =>
Invoke(getPath, "toString", ObjectType(classOf[String]))
case c if c == classOf[java.math.BigDecimal] =>
Invoke(getPath, "toJavaBigDecimal", ObjectType(classOf[java.math.BigDecimal]))
case c if c.isArray =>
val elementType = c.getComponentType
val primitiveMethod = elementType match {
case c if c == java.lang.Boolean.TYPE => Some("toBooleanArray")
case c if c == java.lang.Byte.TYPE => Some("toByteArray")
case c if c == java.lang.Short.TYPE => Some("toShortArray")
case c if c == java.lang.Integer.TYPE => Some("toIntArray")
case c if c == java.lang.Long.TYPE => Some("toLongArray")
case c if c == java.lang.Float.TYPE => Some("toFloatArray")
case c if c == java.lang.Double.TYPE => Some("toDoubleArray")
case _ => None
}
primitiveMethod.map { method =>
Invoke(getPath, method, ObjectType(c))
}.getOrElse {
Invoke(
MapObjects(
p => deserializerFor(typeToken.getComponentType, Some(p)),
getPath,
inferDataType(elementType)._1),
"array",
ObjectType(c))
}
case c if listType.isAssignableFrom(typeToken) =>
val et = elementType(typeToken)
val array =
Invoke(
MapObjects(
p => deserializerFor(et, Some(p)),
getPath,
inferDataType(et)._1),
"array",
ObjectType(classOf[Array[Any]]))
StaticInvoke(classOf[java.util.Arrays], ObjectType(c), "asList", array :: Nil)
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
val keyDataType = inferDataType(keyType)._1
val valueDataType = inferDataType(valueType)._1
val keyData =
Invoke(
MapObjects(
p => deserializerFor(keyType, Some(p)),
Invoke(getPath, "keyArray", ArrayType(keyDataType)),
keyDataType),
"array",
ObjectType(classOf[Array[Any]]))
val valueData =
Invoke(
MapObjects(
p => deserializerFor(valueType, Some(p)),
Invoke(getPath, "valueArray", ArrayType(valueDataType)),
valueDataType),
"array",
ObjectType(classOf[Array[Any]]))
StaticInvoke(
ArrayBasedMapData.getClass,
ObjectType(classOf[JMap[_, _]]),
"toJavaMap",
keyData :: valueData :: Nil)
case other =>
val properties = getJavaBeanReadableAndWritableProperties(other)
val setters = properties.map { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val (_, nullable) = inferDataType(fieldType)
val constructor = deserializerFor(fieldType, Some(addToPath(fieldName)))
val setter = if (nullable) {
constructor
} else {
AssertNotNull(constructor, Seq("currently no type path record in java"))
}
p.getWriteMethod.getName -> setter
}.toMap
val newInstance = NewInstance(other, Nil, ObjectType(other), propagateNull = false)
val result = InitializeJavaBean(newInstance, setters)
if (path.nonEmpty) {
expressions.If(
IsNull(getPath),
expressions.Literal.create(null, ObjectType(other)),
result
)
} else {
result
}
}
}
/**
* Returns an expression for serializing an object of the given type to an internal row.
*/
def serializerFor(beanClass: Class[_]): CreateNamedStruct = {
val inputObject = BoundReference(0, ObjectType(beanClass), nullable = true)
val nullSafeInput = AssertNotNull(inputObject, Seq("top level input bean"))
serializerFor(nullSafeInput, TypeToken.of(beanClass)) match {
case expressions.If(_, _, s: CreateNamedStruct) => s
case other => CreateNamedStruct(expressions.Literal("value") :: other :: Nil)
}
}
private def serializerFor(inputObject: Expression, typeToken: TypeToken[_]): Expression = {
def toCatalystArray(input: Expression, elementType: TypeToken[_]): Expression = {
val (dataType, nullable) = inferDataType(elementType)
if (ScalaReflection.isNativeType(dataType)) {
NewInstance(
classOf[GenericArrayData],
input :: Nil,
dataType = ArrayType(dataType, nullable))
} else {
MapObjects(serializerFor(_, elementType), input, ObjectType(elementType.getRawType))
}
}
if (!inputObject.dataType.isInstanceOf[ObjectType]) {
inputObject
} else {
typeToken.getRawType match {
case c if c == classOf[String] =>
StaticInvoke(
classOf[UTF8String],
StringType,
"fromString",
inputObject :: Nil)
case c if c == classOf[java.sql.Timestamp] =>
StaticInvoke(
DateTimeUtils.getClass,
TimestampType,
"fromJavaTimestamp",
inputObject :: Nil)
case c if c == classOf[java.sql.Date] =>
StaticInvoke(
DateTimeUtils.getClass,
DateType,
"fromJavaDate",
inputObject :: Nil)
case c if c == classOf[java.math.BigDecimal] =>
StaticInvoke(
Decimal.getClass,
DecimalType.SYSTEM_DEFAULT,
"apply",
inputObject :: Nil)
case c if c == classOf[java.lang.Boolean] =>
Invoke(inputObject, "booleanValue", BooleanType)
case c if c == classOf[java.lang.Byte] =>
Invoke(inputObject, "byteValue", ByteType)
case c if c == classOf[java.lang.Short] =>
Invoke(inputObject, "shortValue", ShortType)
case c if c == classOf[java.lang.Integer] =>
Invoke(inputObject, "intValue", IntegerType)
case c if c == classOf[java.lang.Long] =>
Invoke(inputObject, "longValue", LongType)
case c if c == classOf[java.lang.Float] =>
Invoke(inputObject, "floatValue", FloatType)
case c if c == classOf[java.lang.Double] =>
Invoke(inputObject, "doubleValue", DoubleType)
case _ if typeToken.isArray =>
toCatalystArray(inputObject, typeToken.getComponentType)
case _ if listType.isAssignableFrom(typeToken) =>
toCatalystArray(inputObject, elementType(typeToken))
case _ if mapType.isAssignableFrom(typeToken) =>
val (keyType, valueType) = mapKeyValueType(typeToken)
ExternalMapToCatalyst(
inputObject,
ObjectType(keyType.getRawType),
serializerFor(_, keyType),
keyNullable = true,
ObjectType(valueType.getRawType),
serializerFor(_, valueType),
valueNullable = true
)
case other =>
val properties = getJavaBeanReadableAndWritableProperties(other)
val nonNullOutput = CreateNamedStruct(properties.flatMap { p =>
val fieldName = p.getName
val fieldType = typeToken.method(p.getReadMethod).getReturnType
val fieldValue = Invoke(
inputObject,
p.getReadMethod.getName,
inferExternalType(fieldType.getRawType))
expressions.Literal(fieldName) :: serializerFor(fieldValue, fieldType) :: Nil
})
val nullOutput = expressions.Literal.create(null, nonNullOutput.dataType)
expressions.If(IsNull(inputObject), nullOutput, nonNullOutput)
}
}
}
}
| jlopezmalla/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/JavaTypeInference.scala | Scala | apache-2.0 | 18,354 |
package com.sksamuel.elastic4s.http.search.aggs
import com.sksamuel.elastic4s.http.search.SearchBodyBuilderFn
import com.sksamuel.elastic4s.searches.{DateHistogramInterval, SearchRequest}
import org.scalatest.{FunSuite, Matchers}
class CumulativeSumAggBuilderTest extends FunSuite with Matchers{
import com.sksamuel.elastic4s.http.ElasticDsl._
test("cumulative sum agg should match the basic spec"){
val search = SearchRequest("myIndex" / "myType").aggs(
dateHistogramAgg("sales_per_month", "date")
.interval(DateHistogramInterval.Month)
.subaggs(
Seq(
sumAggregation("sales").field("price"),
cumulativeSumAggregation("cumulative_sales", "sales")
.format("$")
.metadata(
Map("color" -> "blue")
)
)
)
)
SearchBodyBuilderFn(search).string() shouldBe
"""{"version":true,"aggs":{"sales_per_month":{"date_histogram":{"interval":"1M","field":"date"},"aggs":{"sales":{"sum":{"field":"price"}},"cumulative_sales":{"cumulative_sum":{"buckets_path":"sales","format":"$"},"meta":{"color":"blue"}}}}}}"""
}
}
| Tecsisa/elastic4s | elastic4s-http/src/test/scala/com/sksamuel/elastic4s/http/search/aggs/CumulativeSumAggBuilderTest.scala | Scala | apache-2.0 | 1,154 |
/*
* Based on https://raw.githubusercontent.com/scalaz/scalaz/v7.1.7/core/src/main/scala/scalaz/NonEmptyList.scala
* License: https://raw.githubusercontent.com/scalaz/scalaz/v7.1.7/etc/LICENCE
*/
package org.http4s.util
import scala.annotation.tailrec
import scalaz._
/** A singly-linked list that is guaranteed to be non-empty.
*
* Forked from Scalaz 7.1 after moving past Scalaz 7.1 out of a desire to maintain
* one based on `List`.
*/
final class NonEmptyList[+A] private[util] (val head: A, val tail: List[A]) {
import NonEmptyList._
import Zipper._
def <::[AA >: A](b: AA): NonEmptyList[AA] = nel(b, head :: tail)
def <:::[AA >: A](bs: List[AA]): NonEmptyList[AA] = bs match {
case Nil => this
case b :: bs => nel(b, bs ::: list)
}
def :::>[AA >: A](bs: List[AA]): NonEmptyList[AA] = nel(head, tail ::: bs)
/** Append one nonempty list to another. */
def append[AA >: A](f2: NonEmptyList[AA]): NonEmptyList[AA] = list <::: f2
def map[B](f: A => B): NonEmptyList[B] = nel(f(head), tail.map(f))
def foreach(f: A => Unit): Unit = {
f(head)
tail foreach f
}
import collection.mutable.ListBuffer
def flatMap[B](f: A => NonEmptyList[B]): NonEmptyList[B] = {
val b = new ListBuffer[B]
val p = f(head)
b += p.head
b ++= p.tail
tail.foreach {
a =>
val p = f(a)
b += p.head
b ++= p.tail
}
val bb = b.toList
nel(bb.head, bb.tail)
}
def distinct[AA>:A](implicit A: Order[AA]): NonEmptyList[AA] = {
@tailrec def loop(src: List[A], seen: ISet[AA], acc: NonEmptyList[AA]): NonEmptyList[AA] =
src match {
case h :: t =>
if (seen.notMember(h)) loop(t, seen.insert(h), h <:: acc)
else loop(t, seen, acc)
case Nil =>
acc.reverse
}
loop(tail, ISet.singleton(head), NonEmptyList(head))
}
def traverse1[F[_], B](f: A => F[B])(implicit F: Apply[F]): F[NonEmptyList[B]] = {
import std.list._
tail match {
case Nil => F.map(f(head))(nel(_, Nil))
case b :: bs => F.apply2(f(head), OneAnd.oneAndTraverse[List].traverse1(OneAnd(b, bs))(f)) {
case (h, t) => nel(h, t.head :: t.tail)
}
}
}
def list: List[A] = head :: tail
def stream: Stream[A] = head #:: tail.toStream
def toZipper: Zipper[A] = zipper(Stream.Empty, head, tail.toStream)
def zipperEnd: Zipper[A] = {
import Stream._
tail.reverse match {
case Nil => zipper(empty, head, empty)
case t :: ts => zipper(ts.toStream :+ head, t, empty)
}
}
def init: List[A] = if(tail.isEmpty) Nil else (head :: tail.init)
def last: A = if(tail.isEmpty) head else tail.last
def tails: NonEmptyList[NonEmptyList[A]] = {
@annotation.tailrec
def tails0(as: NonEmptyList[A], accum: List[NonEmptyList[A]]): NonEmptyList[NonEmptyList[A]] =
as.tail match {
case Nil => nel(as, accum).reverse
case h :: t => tails0(nel(h, t), as :: accum)
}
tails0(this, Nil)
}
def reverse: NonEmptyList[A] = (list.reverse: @unchecked) match {
case x :: xs => nel(x, xs)
}
def sortBy[B](f: A => B)(implicit o: Order[B]): NonEmptyList[A] = (list.sortBy(f)(o.toScalaOrdering): @unchecked) match {
case x :: xs => nel(x, xs)
}
def sortWith(lt: (A, A) => Boolean): NonEmptyList[A] = (list.sortWith(lt): @unchecked) match {
case x :: xs => nel(x, xs)
}
def sorted[B >: A](implicit o: Order[B]): NonEmptyList[A] = (list.sorted(o.toScalaOrdering): @unchecked) match {
case x :: xs => nel(x, xs)
}
def size: Int = 1 + tail.size
def zip[B](b: => NonEmptyList[B]): NonEmptyList[(A, B)] = {
val _b = b
nel((head, _b.head), tail zip _b.tail)
}
def unzip[X, Y](implicit ev: A <:< (X, Y)): (NonEmptyList[X], NonEmptyList[Y]) = {
val (a, b) = head: (X, Y)
val (aa, bb) = tail.unzip: (List[X], List[Y])
(nel(a, aa), nel(b, bb))
}
def zipWithIndex: NonEmptyList[(A, Int)] = {
@annotation.tailrec
def loop(as: List[A], i: Int, acc: List[(A, Int)]): List[(A, Int)] =
as match {
case x :: y => loop(y, i + 1, (x, i) :: acc)
case _ => acc.reverse
}
new NonEmptyList((head, 0), loop(tail, 1, Nil))
}
override def toString: String = "NonEmpty" + (head :: tail)
override def equals(any: Any): Boolean =
any match {
case that: NonEmptyList[_] => this.list == that.list
case _ => false
}
override def hashCode: Int =
list.hashCode
def exists(p: A => Boolean): Boolean =
p(head) || tail.exists(p)
def contains(elem: Any): Boolean =
head == elem || tail.contains(elem)
def collectFirst[B](pf: PartialFunction[A, B]): Option[B] =
pf.lift(head).orElse(tail.collectFirst(pf))
def mkString(sep: String): String = {
val sb = new StringBuilder
sb.append(head)
tail.foreach(a => sb.append(sep).append(a))
sb.toString
}
def length: Int =
1 + tail.length
}
object NonEmptyList extends NonEmptyListInstances with NonEmptyListFunctions {
def apply[A](h: A, t: A*): NonEmptyList[A] =
nels(h, t: _*)
def unapplySeq[A](v: NonEmptyList[A]): Option[(A, List[A])] =
Some((v.head, v.tail))
def lift[A, B](f: NonEmptyList[A] => B): IList[A] => Option[B] = {
case INil() ⇒ None
case ICons(h, t) ⇒ Some(f(NonEmptyList.nel(h, t.toList)))
}
}
sealed abstract class NonEmptyListInstances0 {
implicit def nonEmptyListEqual[A: Equal]: Equal[NonEmptyList[A]] = Equal.equalBy[NonEmptyList[A], List[A]](_.list)(std.list.listEqual[A])
}
sealed abstract class NonEmptyListInstances extends NonEmptyListInstances0 {
implicit val nonEmptyList =
new Traverse1[NonEmptyList]
with Monad[NonEmptyList]
with Plus[NonEmptyList]
with Comonad[NonEmptyList]
with Zip[NonEmptyList]
with Unzip[NonEmptyList]
with Align[NonEmptyList] {
def traverse1Impl[G[_] : Apply, A, B](fa: NonEmptyList[A])(f: A => G[B]): G[NonEmptyList[B]] =
fa traverse1 f
override def foldMapRight1[A, B](fa: NonEmptyList[A])(z: A => B)(f: (A, => B) => B): B = {
val reversed = fa.reverse
reversed.tail.foldLeft(z(reversed.head))((x, y) => f(y, x))
}
override def foldMapLeft1[A, B](fa: NonEmptyList[A])(z: A => B)(f: (B, A) => B): B =
fa.tail.foldLeft(z(fa.head))(f)
override def foldMap1[A, B](fa: NonEmptyList[A])(f: A => B)(implicit F: Semigroup[B]): B = {
fa.tail.foldLeft(f(fa.head))((x, y) => F.append(x, f(y)))
}
// would otherwise use traverse1Impl
override def foldLeft[A, B](fa: NonEmptyList[A], z: B)(f: (B, A) => B): B =
fa.tail.foldLeft(f(z, fa.head))(f)
def bind[A, B](fa: NonEmptyList[A])(f: A => NonEmptyList[B]): NonEmptyList[B] = fa flatMap f
def point[A](a: => A): NonEmptyList[A] = NonEmptyList(a)
def plus[A](a: NonEmptyList[A], b: => NonEmptyList[A]): NonEmptyList[A] = a.list <::: b
def copoint[A](p: NonEmptyList[A]): A = p.head
def cobind[A, B](fa: NonEmptyList[A])(f: NonEmptyList[A] => B): NonEmptyList[B] = map(cojoin(fa))(f)
override def cojoin[A](a: NonEmptyList[A]): NonEmptyList[NonEmptyList[A]] = a.tails
def zip[A, B](a: => NonEmptyList[A], b: => NonEmptyList[B]) = a zip b
def unzip[A, B](a: NonEmptyList[(A, B)]) = a.unzip
def alignWith[A, B, C](f: A \\&/ B => C) = (a, b) => {
import std.list._
NonEmptyList.nel(f(\\&/.Both(a.head, b.head)), Align[List].alignWith(f)(a.tail, b.tail))
}
override def length[A](a: NonEmptyList[A]): Int = a.size
override def all[A](fa: NonEmptyList[A])(f: A => Boolean) =
f(fa.head) && fa.tail.forall(f)
override def any[A](fa: NonEmptyList[A])(f: A => Boolean) =
f(fa.head) || fa.tail.exists(f)
}
implicit def nonEmptyListSemigroup[A]: Semigroup[NonEmptyList[A]] = new Semigroup[NonEmptyList[A]] {
def append(f1: NonEmptyList[A], f2: => NonEmptyList[A]) = f1 append f2
}
implicit def nonEmptyListShow[A: Show]: Show[NonEmptyList[A]] =
Contravariant[Show].contramap(std.list.listShow[A])(_.list)
implicit def nonEmptyListOrder[A: Order]: Order[NonEmptyList[A]] =
Order.orderBy[NonEmptyList[A], List[A]](_.list)(std.list.listOrder[A])
}
trait NonEmptyListFunctions {
def nel[A](h: A, t: List[A]): NonEmptyList[A] =
new NonEmptyList(h, t)
def nels[A](h: A, t: A*): NonEmptyList[A] =
nel(h, t.toList)
}
| hvesalai/http4s | core/src/main/scala/org/http4s/util/NonEmptyList.scala | Scala | apache-2.0 | 8,466 |
package com.stefansavev.randomprojections.datarepr.sparse
class SparseVector(val dim: Integer, val ids: Array[Int], val values: Array[Double]) | codeaudit/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/randomprojections/datarepr/sparse/SparseVector.scala | Scala | apache-2.0 | 143 |
package io.udash.web.guide.views.frontend
import io.udash._
import io.udash.web.guide.FrontendState
import io.udash.web.guide.views.ViewContainer
import scalatags.JsDom
case object FrontendViewFactory extends StaticViewFactory[FrontendState.type](() => new FrontendView)
class FrontendView extends ViewContainer {
import JsDom.all._
protected val child = div().render
override def getTemplate: Modifier = div(
h1("Frontend"),
p(
"In this part of the guide you will read about creating a frontend application with Udash. Let's make your ",
"frontend type-safe, elegant and maintainable. "
),
child
)
} | UdashFramework/udash-guide | guide/src/main/scala/io/udash/web/guide/views/frontend/FrontendView.scala | Scala | gpl-3.0 | 643 |
package x7c1.linen.scene.loader.crawling
import android.content.{Context, Intent}
import android.net.Uri
import x7c1.linen.database.control.DatabaseHelper
import x7c1.linen.database.struct.HasLoaderScheduleId
import x7c1.linen.glue.service.ServiceControl
import x7c1.linen.repository.loader.schedule.LoaderSchedule
import x7c1.wheat.calendar.CalendarDate
import x7c1.wheat.macros.intent.IntentBuilder.from
import x7c1.wheat.macros.logger.Log
import x7c1.wheat.modern.chrono.alarm.WindowAlarm
import x7c1.wheat.modern.formatter.ThrowableFormatter.format
class LoaderScheduler private (
context: Context with ServiceControl,
helper: DatabaseHelper ){
import concurrent.duration._
def setupNextLoader[A: HasLoaderScheduleId](schedule: A): Unit = {
val scheduleId = implicitly[HasLoaderScheduleId[A]] toId schedule
find(schedule) foreach {
case existent if existent.enabled =>
createOrUpdate(existent)
Log info s"[done] schedule updated: (id:$scheduleId)"
case existent =>
createAlarmOf(existent).cancel()
Log info s"[done] schedule canceled: (id:$scheduleId)"
}
}
def cancelSchedule[A: HasLoaderScheduleId](schedule: A): Unit = {
find(schedule) foreach { existent =>
createAlarmOf(existent).cancel()
}
val scheduleId = implicitly[HasLoaderScheduleId[A]] toId schedule
Log info s"[done] schedule canceled: (id:$scheduleId)"
}
private def find[A: HasLoaderScheduleId](schedule: A) = {
helper.selectorOf[LoaderSchedule] findBy schedule matches {
case Right(Some(existent)) =>
Some(existent)
case Right(None) =>
val scheduleId = implicitly[HasLoaderScheduleId[A]] toId schedule
Log error s"schedule not found (id:$scheduleId)"
None
case Left(e) =>
Log error format(e.getCause){"[failed]"}
None
}
}
private def createAlarmOf(schedule: LoaderSchedule) = {
WindowAlarm(
context = context,
intent = createIntent(schedule)
)
}
private def createOrUpdate(schedule: LoaderSchedule) = {
Log info s"[init] $schedule"
schedule nextStartAfter CalendarDate.now() match {
case Some(start) =>
createAlarmOf(schedule) triggerInTime (
window = 1.hour,
startMilliSeconds = start.toMilliseconds
/* debug
window = 10.seconds,
startMilliSeconds = (CalendarDate.now() + 10.seconds).toMilliseconds
// */
)
case None =>
Log warn s"time not found: (schedule:${schedule.scheduleId})"
}
}
private def createIntent(schedule: LoaderSchedule): Intent = {
val intent = SchedulerService(context) buildIntent from {
_.loadFromSchedule(schedule.scheduleId)
}
intent setData Uri.parse(
s"linen://loader.schedule/setup/${schedule.scheduleId}"
)
intent
}
}
object LoaderScheduler {
def apply(context: Context with ServiceControl, helper: DatabaseHelper): LoaderScheduler = {
new LoaderScheduler(
context = context,
helper = helper
)
}
}
| x7c1/Linen | linen-scene/src/main/scala/x7c1/linen/scene/loader/crawling/LoaderScheduler.scala | Scala | mit | 3,068 |
object AsahiStats extends CrawlerStats {
protected def extractor = "div.ArticleText p"
protected def allUrls = Seq(
"http://www.asahi.com/articles/ASG8C6KRKG8CUCVL01Y.html",
"http://www.asahi.com/articles/ASG8D6SW3G8DTQIP008.html",
"http://www.asahi.com/articles/ASG8D63DRG8DULOB01Q.html",
"http://www.asahi.com/articles/ASG6W2GB1G6WUCVL003.html"
)
}
| mauhiz/kanji-jikan | src/main/scala/AsahiStats.scala | Scala | epl-1.0 | 384 |
package com.gilt.pickling.avroschema
import org.scalatest.{Assertions, FunSuite}
import com.gilt.pickling.TestObjs._
import scala.pickling._
import com.gilt.pickling.TestUtils._
class SingleFieldTest extends FunSuite with Assertions {
test("Generate schema from a case class with a single int field") {
assert(fingerPrint("/avro/single/SingleInt.avsc") === fingerPrint(Schema(SingleInt(123))))
assert(fingerPrint("/avro/single/SingleInt.avsc") === fingerPrint(Schema(classOf[SingleInt])))
assert(fingerPrint("/avro/single/SingleInt.avsc") === fingerPrint(Schema[SingleInt]))
}
test("Generate schema from a case class with a single long field") {
assert(fingerPrint("/avro/single/SingleLong.avsc") === fingerPrint(Schema(SingleLong(123))))
assert(fingerPrint("/avro/single/SingleLong.avsc") === fingerPrint(Schema(classOf[SingleLong])))
assert(fingerPrint("/avro/single/SingleLong.avsc") === fingerPrint(Schema[SingleLong]))
}
test("Generate schema from a case class with a single double field") {
assert(fingerPrint("/avro/single/SingleDouble.avsc") === fingerPrint(Schema(SingleDouble(123))))
assert(fingerPrint("/avro/single/SingleDouble.avsc") === fingerPrint(Schema(classOf[SingleDouble])))
assert(fingerPrint("/avro/single/SingleDouble.avsc") === fingerPrint(Schema[SingleDouble]))
}
test("Generate schema from a case class with a single float field") {
assert(fingerPrint("/avro/single/SingleFloat.avsc") === fingerPrint(Schema(SingleFloat(123))))
assert(fingerPrint("/avro/single/SingleFloat.avsc") === fingerPrint(Schema(classOf[SingleFloat])))
assert(fingerPrint("/avro/single/SingleFloat.avsc") === fingerPrint(Schema[SingleFloat]))
}
test("Generate schema from a case class with a single boolean field") {
assert(fingerPrint("/avro/single/SingleBoolean.avsc") === fingerPrint(Schema(SingleBoolean(id = true))))
assert(fingerPrint("/avro/single/SingleBoolean.avsc") === fingerPrint(Schema(classOf[SingleBoolean])))
assert(fingerPrint("/avro/single/SingleBoolean.avsc") === fingerPrint(Schema[SingleBoolean]))
}
test("Generate schema from a case class with a single string field") {
assert(fingerPrint("/avro/single/SingleString.avsc") === fingerPrint(Schema(SingleString("abc"))))
assert(fingerPrint("/avro/single/SingleString.avsc") === fingerPrint(Schema(classOf[SingleString])))
assert(fingerPrint("/avro/single/SingleString.avsc") === fingerPrint(Schema[SingleString]))
}
test("Generate schema from a case class with a single char field") {
assert(fingerPrint("/avro/single/SingleChar.avsc") === fingerPrint(Schema(SingleChar('a'))))
assert(fingerPrint("/avro/single/SingleChar.avsc") === fingerPrint(Schema(classOf[SingleChar])))
assert(fingerPrint("/avro/single/SingleChar.avsc") === fingerPrint(Schema[SingleChar]))
}
test("Generate schema from a case class with a single byte field") {
assert(fingerPrint("/avro/single/SingleByte.avsc") === fingerPrint(Schema(SingleByte(1.toByte))))
assert(fingerPrint("/avro/single/SingleByte.avsc") === fingerPrint(Schema(classOf[SingleByte])))
assert(fingerPrint("/avro/single/SingleByte.avsc") === fingerPrint(Schema[SingleByte]))
}
test("Generate schema from a case class with a single short field") {
assert(fingerPrint("/avro/single/SingleShort.avsc") === fingerPrint(Schema(SingleShort(1.toShort))))
assert(fingerPrint("/avro/single/SingleShort.avsc") === fingerPrint(Schema(classOf[SingleShort])))
assert(fingerPrint("/avro/single/SingleShort.avsc") === fingerPrint(Schema[SingleShort]))
}
}
| gilt/gfc-avro | src/test/scala/com/gilt/pickling/avroschema/SingleFieldTest.scala | Scala | apache-2.0 | 3,609 |
implicit object MonadOption extends Monad[Option] {
override def pure[A](a: A): Option[A] = Some(a)
override def flatMap[A,B](m: Option[A])(g: A => Option[B]): Option[B] =
m match {
case Some(a) => g(a)
case None => None
}
}
| lkuczera/scalatypeclasses | steps/MonadOption.scala | Scala | mit | 250 |
object XmlLiterals {
<foo bar=""/>
} | mdemarne/scalahost | tests/src/test/resources/ScalaToMeta/XmlLiterals/Original.scala | Scala | bsd-3-clause | 38 |
package at.logic.gapt.proofs.lksk.algorithms
import at.logic.gapt.proofs.lk.base.{ LKProof, NullaryLKProof }
import at.logic.gapt.proofs.lk.{ BinaryLKProof, UnaryLKProof }
import at.logic.gapt.proofs.lksk.UnaryLKskProof
import at.logic.gapt.proofs.proofs.RuleTypeA
/**
* Created by marty on 8/25/14.
*/
object rule_isomorphic extends rule_isomorphic
class rule_isomorphic {
def apply( p1: LKProof, p2: LKProof, pred: ( RuleTypeA, RuleTypeA ) => Boolean ): Boolean =
( p1, p2 ) match {
case ( a1: NullaryLKProof, a2: NullaryLKProof ) =>
pred( a1.rule, a2.rule )
case ( UnaryLKProof( t1, up1, _, _, _ ), UnaryLKProof( t2, up2, _, _, _ ) ) =>
pred( t1, t2 ) && apply( up1, up2, pred )
case ( UnaryLKProof( t1, up1, _, _, _ ), UnaryLKskProof( t2, up2, _, _, _ ) ) =>
pred( t1, t2 ) && apply( up1, up2, pred )
case ( UnaryLKskProof( t1, up1, _, _, _ ), UnaryLKProof( t2, up2, _, _, _ ) ) =>
pred( t1, t2 ) && apply( up1, up2, pred )
case ( UnaryLKskProof( t1, up1, _, _, _ ), UnaryLKskProof( t2, up2, _, _, _ ) ) =>
pred( t1, t2 ) && apply( up1, up2, pred )
case ( BinaryLKProof( t1, up1a, up1b, _, _, _, _ ), BinaryLKProof( t2, up2a, up2b, _, _, _, _ ) ) =>
pred( t1, t2 ) && apply( up1a, up2a, pred ) && apply( up1b, up2b, pred )
case _ =>
throw new Exception( "can not compare " + p1.rule + " and " + p2.rule + "\\np1= " + p1 + "\\np2= " + p2 )
}
}
| gisellemnr/gapt | src/main/scala/at/logic/gapt/proofs/lksk/algorithms/isomorphic.scala | Scala | gpl-3.0 | 1,457 |
/*
package Integration
import org.scalatestplus.play._
/**
* add your integration spec here.
* An integration test will fire up a whole play application in a real (or headless) browser
*/
class IntegrationSpec extends PlaySpec with OneServerPerTest with OneBrowserPerTest with HtmlUnitFactory {
"Application" should {
"work from within a browser" in {
go to ("http://localhost:" + port)
pageSource must include ("Your new application is ready.")
}
}
}
*/
| ZDevelop94/RNG-GitHubAPI | test/Integration/IntegrationSpec.scala | Scala | apache-2.0 | 488 |
package play.boilerplate.api.client.dsl
import Compat._
import play.boilerplate.api.{TraceLogger, Tracer}
trait ClientTraceLogger extends TraceLogger {
protected def operationTracer(operationId: String)(implicit tracer: Tracer): Tracer =
tracer.transform(msg => "[operationId: " + operationId + "] " + msg)
protected def printRequest(method: String, request: WSRequest, body: String): String
def logRequest(operationId: String, method: String, request: WSRequest, body: => String)(implicit tracer: Tracer): Unit = {
trace(printRequest(method, request, body))(operationTracer(operationId))
}
protected def printResponse(response: WSResponse): String
def logResponse(operationId: String, response: WSResponse)(implicit tracer: Tracer): Unit = {
trace(printResponse(response))(operationTracer(operationId))
}
def logError(operationId: String, msg: => String, cause: => Throwable)(implicit tracer: Tracer): Unit = {
error(msg, cause)(operationTracer(operationId))
}
}
object ClientTraceLogger {
object NoLogger extends ClientTraceLogger {
override protected def printRequest(method: String, request: WSRequest, body: String): String = ""
override protected def printResponse(response: WSResponse): String = ""
// TraceLogger
override protected def errorInternal(msg: => String, error: => Throwable): Unit = ()
override protected def traceInternal(msg: => String): Unit = ()
}
abstract class Default extends ClientTraceLogger {
private def printHeaders(headers: Map[String, Seq[String]]): String = {
(for {
(name, values) <- headers
value <- values
} yield name + ": " + value).mkString("\\n")
}
override protected def printRequest(method: String, request: WSRequest, body: String): String = {
s"""REQUEST:
|$method ${request.url}
|${printHeaders(request.headers)}
|
|$body
""".stripMargin
}
override protected def printResponse(response: WSResponse): String = {
s"""RESPONSE:
|${response.status} ${response.statusText}
|${printHeaders(response.headers)}
|
|${response.body}
""".stripMargin
}
}
}
| Romastyi/sbt-play-boilerplate | api-client/share/src/main/scala/play/boilerplate/api/client/dsl/ClientTraceLogger.scala | Scala | apache-2.0 | 2,218 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.sift
import javax.inject.{ Inject, Singleton }
import model._
import model.persisted.sift.SiftAnswersStatus.SiftAnswersStatus
import play.api.Logging
import repositories.SchemeRepository
import repositories.application.GeneralApplicationRepository
import repositories.sift.SiftAnswersRepository
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
@Singleton
class SiftAnswersService @Inject() (appRepo: GeneralApplicationRepository,
siftAnswersRepo: SiftAnswersRepository,
schemeRepository: SchemeRepository
) extends Logging {
def addSchemeSpecificAnswer(applicationId: String, schemeId: SchemeId, answer: model.exchange.sift.SchemeSpecificAnswer): Future[Unit] = {
siftAnswersRepo.addSchemeSpecificAnswer(applicationId, schemeId, model.persisted.sift.SchemeSpecificAnswer(answer))
}
def addGeneralAnswers(applicationId: String, answers: model.exchange.sift.GeneralQuestionsAnswers): Future[Unit] = {
siftAnswersRepo.addGeneralAnswers(applicationId, model.persisted.sift.GeneralQuestionsAnswers(answers))
}
def findSiftAnswers(applicationId: String): Future[Option[model.exchange.sift.SiftAnswers]] = {
siftAnswersRepo.findSiftAnswers(applicationId).map(persisted =>
persisted.map(
psa => model.exchange.sift.SiftAnswers(psa)
)
)
}
def findSiftAnswersStatus(applicationId: String): Future[Option[model.exchange.sift.SiftAnswersStatus.Value]] = {
siftAnswersRepo.findSiftAnswersStatus(applicationId).map(persisted => persisted.map(
psas => model.exchange.sift.SiftAnswersStatus.withName(psas.toString)
))
}
def setSiftAnswersStatus(applicationId: String, status: SiftAnswersStatus): Future[Unit] = {
for {
_ <- siftAnswersRepo.setSiftAnswersStatus(applicationId, status)
} yield ()
}
def findSchemeSpecificAnswer(applicationId: String, schemeId: SchemeId): Future[Option[model.exchange.sift.SchemeSpecificAnswer]] = {
siftAnswersRepo.findSchemeSpecificAnswer(applicationId, schemeId).map(persisted => persisted.map(
pssa => model.exchange.sift.SchemeSpecificAnswer(pssa.rawText)
))
}
def findGeneralAnswers(applicationId: String): Future[Option[model.exchange.sift.GeneralQuestionsAnswers]] = {
siftAnswersRepo.findGeneralQuestionsAnswers(applicationId).map(persisted => persisted.map(pgqa =>
model.exchange.sift.GeneralQuestionsAnswers(pgqa)
))
}
// This is called when the candidate submits the form answers
def submitAnswers(applicationId: String): Future[Unit] = {
for {
currentSchemeStatus <- appRepo.getCurrentSchemeStatus(applicationId)
schemesPassed = currentSchemeStatus.filter(_.result == EvaluationResults.Green.toString).map(_.schemeId).toSet
schemesPassedRequiringSift = schemeRepository.schemes.filter( s =>
schemesPassed.contains(s.id) && s.siftRequirement.contains(SiftRequirement.FORM)
).map(_.id).toSet
schemesPassedNotRequiringSift = schemeRepository.schemes.filter( s =>
schemesPassed.contains(s.id) && !s.siftEvaluationRequired
).map(_.id).toSet
_ <- siftAnswersRepo.submitAnswers(applicationId, schemesPassedRequiringSift)
progressResponse <- appRepo.findProgress(applicationId)
siftTestResultsReceived = progressResponse.siftProgressResponse.siftTestResultsReceived
_ <- maybeMoveToReadyOrTestPending(applicationId, schemesPassed, siftTestResultsReceived)
_ <- maybeMoveToCompleted(applicationId, schemesPassed, schemesPassedNotRequiringSift)
} yield {}
}
def removeAnswers(applicationId: String): Future[Unit] = {
siftAnswersRepo.removeSiftAnswers(applicationId)
}
// Maybe move the candidate to SIFT_READY to indicate he/she is ready to be sifted for form based schemes
// or to SIFT_FORMS_COMPLETE_NUMERIC_TEST_PENDING to indicate the forms have been submitted and we are waiting
// for the numeric test to be completed
private def maybeMoveToReadyOrTestPending(applicationId: String,
schemesPassed: Set[SchemeId], siftTestResultsReceived: Boolean): Future[Unit] = {
val hasNumericSchemes = schemeRepository.numericTestSiftRequirementSchemeIds.exists( s => schemesPassed.contains(s))
(hasNumericSchemes, siftTestResultsReceived) match {
case (false, _) =>
// No numeric schemes so move candidate to SIFT_READY
logger.info(s"Candidate $applicationId has submitted sift forms and has no numeric schemes " +
s"so moving to ${ProgressStatuses.SIFT_READY}")
appRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_READY)
case (true, true) =>
// Numeric schemes and the test results have been received so move candidate to SIFT_READY
logger.info(s"Candidate $applicationId has submitted sift forms, has numeric schemes, has " +
s"taken the numeric test and received the results so moving to ${ProgressStatuses.SIFT_READY}")
appRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_READY)
case (true, false) =>
// Numeric schemes and the test results have not been received so move the candidate to NUMERIC_TEST_PENDING
logger.info(s"Candidate $applicationId has submitted sift forms, has numeric schemes but has " +
s"not received test results so now moving to ${ProgressStatuses.SIFT_FORMS_COMPLETE_NUMERIC_TEST_PENDING}")
appRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_FORMS_COMPLETE_NUMERIC_TEST_PENDING)
case _ =>
// Do not move the candidate
logger.info(s"Candidate $applicationId is not yet in a state to move to ${ProgressStatuses.SIFT_READY}")
Future.successful(())
}
}
// Maybe move the candidate to SIFT_COMPLETED to indicate the candidate has no schemes that require a sift
// and can be moved straight into SIFT_COMPLETED
private def maybeMoveToCompleted(applicationId: String, passedSchemes: Set[SchemeId],
passedSchemesNotRequiringSift: Set[SchemeId]): Future[Unit] = {
val allPassedSchemesDoNotRequireSift = passedSchemes.size == passedSchemesNotRequiringSift.size &&
passedSchemes == passedSchemesNotRequiringSift
if(allPassedSchemesDoNotRequireSift) {
logger.info(s"Candidate $applicationId has submitted sift forms and has no schemes requiring a sift so " +
s"now moving to ${ProgressStatuses.SIFT_COMPLETED}")
appRepo.addProgressStatusAndUpdateAppStatus(applicationId, ProgressStatuses.SIFT_COMPLETED)
} else {
logger.info(s"Candidate $applicationId has schemes, which require sifting so cannot " +
s"move to ${ProgressStatuses.SIFT_COMPLETED}")
Future.successful(())
}
}
}
| hmrc/fset-faststream | app/services/sift/SiftAnswersService.scala | Scala | apache-2.0 | 7,525 |
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ml.generators
import util.{Datasets, Tempo}
import scala.util.Random
object ARFF2matlab extends App {
val rnd = new Random(0)
val data = Datasets.arff("/home/davi/working-copies/arff/abalone-11classes.arff").right.get
rnd.shuffle(data).drop(2000) foreach {
x => println(x.label + "," + x.toString_without_class)
}
/* 1 to 10 foreach {
_ =>
Tempo.start
val ceos = CEOSELM(0)
ceos.build(data.take(30))
ceos.grow(20)
data.take(2000).drop(30) foreach ceos.increment
print(ceos.accuracy(data.drop(2000)))
Tempo.print_stop
}*/
}
| active-learning/active-learning-scala | src/main/scala/ml/generators/ARFF2matlab.scala | Scala | gpl-2.0 | 1,348 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.rinterpreter
import java.util.Properties
import org.apache.zeppelin.RTest
import org.apache.zeppelin.interpreter.{Interpreter, InterpreterContext, InterpreterResult, InterpreterGroup}
import org.scalatest.Matchers._
import org.scalatest._
import java.util.ArrayList
class RInterpreterTest extends FlatSpec {
RContext.resetRcon()
class RIntTester extends RInterpreter(new Properties(), startSpark = false) {
def interpret(s: String, interpreterContext: InterpreterContext): InterpreterResult = {
val result : Array[String] = rContext.evalS1(s)
new InterpreterResult(InterpreterResult.Code.SUCCESS, result.mkString("\\n"))
}
}
val rint = new RIntTester()
"An RInterpreter" should "exist" in {
assert(rint != null)
}
it should "not complain when we assign it a group" in {
val grp : InterpreterGroup = new InterpreterGroup("test")
val lst : ArrayList[Interpreter] = new ArrayList[Interpreter]()
lst.add(rint)
grp.put(rint.getClassName(), lst)
rint.setInterpreterGroup(grp)
}
it should "create a fresh rContext when we ask for one" in {
assert(! rint.getrContext.isOpen)
}
it should "open" taggedAs(RTest) in {
rint.open()
assert(rint.getrContext.isOpen)
}
it should "have rzeppelin available" taggedAs(RTest) in {
assume(rint.getrContext.isOpen)
assert(rint.getrContext.testRPackage("rzeppelin"))
}
it should "have an rContext able to do simple addition" taggedAs(RTest) in {
assume(rint.getrContext.isOpen)
assert(rint.getrContext.evalI0("2 + 2") == 4)
}
/* it should "have a functional completion function" taggedAs(RTest) in {
val result = rint.hiddenCompletion("hi", 3)
result should (contain ("hist"))
}*/
it should "have a working progress meter" in {
rint.getrContext.setProgress(50)
assertResult(50) {
rint.getrContext.getProgress
}
}
it should "have persistent properties" in {
val props = new Properties()
props.setProperty("hello", "world")
rint.setProperty(props)
assertResult("world") {
rint.getProperty("hello")
}
}
var rint2 : RIntTester = null
it should "Share RContexts if they share the same InterpreterGroup" in {
rint2 = new RIntTester()
val lst : ArrayList[Interpreter] = new ArrayList[Interpreter]()
lst.add(rint2)
val grp = rint.getInterpreterGroup()
grp.put(rint2.getClassName(), lst)
rint2.setInterpreterGroup(grp)
rint2.open()
rint.getrContext should be theSameInstanceAs rint2.getrContext
}
"Opening the second RInterpreter" should "not have closed the first RContext" in {
assert(rint.getrContext.isOpen)
}
var rint3 : RIntTester = null
"An RInterpreter in a different InterpreterGroup" should "have a different R Context" in {
rint3 = new RIntTester()
val grp : InterpreterGroup = new InterpreterGroup("othertest")
val lst : ArrayList[Interpreter] = new ArrayList[Interpreter]()
lst.add(rint3)
grp.put(rint3.getClassName(), lst)
rint3.setInterpreterGroup(grp)
rint3.open()
rint3.getrContext shouldNot be theSameInstanceAs rint2.getrContext
}
"The first RInterpreter" should "close politely" in {
rint.close()
assert(!rint.getrContext.isOpen)
}
"and so" should "the other one" in {
rint2.close()
assert(!rint2.getrContext.isOpen)
}
"and " should "the third one" in {
rint3.close()
assert(!rint2.getrContext.isOpen)
}
// fixture.sparky.close()
}
| SarunasG/zeppelin-oidc | r/src/test/scala/org/apache/zeppelin/rinterpreter/RInterpreterTest.scala | Scala | apache-2.0 | 4,316 |
package info.mukel.codeforces4s.api
/**
* Member
*
* Represents a member of a party.
*
* @param handle String. Codeforces user handle.
*/
case class Member(handle: String)
| mukel/codeforces4s | src/main/scala/info/mukel/codeforces4s/api/Member.scala | Scala | gpl-2.0 | 180 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.cluster.typed
import akka.actor.{ ActorSystem => ActorSystemClassic }
import akka.cluster.{ Cluster => ClusterClassic }
import akka.actor.typed.ActorSystem
import com.lightbend.lagom.scaladsl.cluster.ClusterComponents
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
/**
* Akka Cluster Sharding Typed components (for compile-time injection).
*/
trait ClusterShardingTypedComponents {
def actorSystem: ActorSystemClassic
lazy val clusterSharding: ClusterSharding = {
val actorSystemTyped: ActorSystem[_] = {
import akka.actor.typed.scaladsl.adapter._
actorSystem.toTyped
}
ClusterSharding(actorSystemTyped)
}
}
| ignasi35/lagom | cluster/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/cluster/typed/ClusterShardingTypedComponents.scala | Scala | apache-2.0 | 771 |
package org.vitrivr.adampro.shared.catalog.catalogs
import org.vitrivr.adampro.shared.catalog.CatalogManager
import slick.driver.H2Driver.api._
/**
* ADAMpro
*
* Catalog to store measurements.
*
* Ivan Giangreco
* August 2016
*/
private[catalog] class QueryLog(tag: Tag) extends Table[(String, String, String, Array[Byte])](tag, Some(CatalogManager.SCHEMA), "ap_querylog") {
def key = column[String]("key", O.PrimaryKey)
def entityname = column[String]("entity")
def attribute = column[String]("attribute")
def query = column[Array[Byte]]("query")
/**
* Special fields
*/
def * = (key, entityname, attribute, query)
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/shared/catalog/catalogs/QueryLog.scala | Scala | mit | 657 |
package com.github.bluenote
import java.io.PrintStream
import java.io.FileOutputStream
import java.io.OutputStream
import java.io.File
object GeneralUtils {
def loadFile(filename: String): String = {
try {
scala.io.Source.fromFile(filename, "utf-8").getLines.mkString("\\n")
} catch {
case e: Throwable => println("Exception while reading file:\\n" + e); ""
}
}
// --------------------------------------------------
// Output Stuff
// --------------------------------------------------
def outputStdOut: PrintStream = System.out
def outputFile(file: File): PrintStream = new PrintStream(new FileOutputStream(file))
def outputFile(filename: String): PrintStream = new PrintStream(new FileOutputStream(filename))
def outputDummy: PrintStream = new PrintStream(new OutputStream() {
override def close() {}
override def flush() {}
override def write(b: Array[Byte]) {}
override def write(b: Array[Byte], off: Int, len: Int) {}
override def write(b: Int) {}
})
}
| bluenote10/ScalaOculusRiftExample | src/main/scala/com/github/bluenote/GeneralUtils.scala | Scala | apache-2.0 | 1,044 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.scalatest.Assertions._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete, Count, Max}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.{Cross, LeftOuter, RightOuter}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, MapData}
import org.apache.spark.sql.types._
private[sql] case class GroupableData(data: Int) {
def getData: Int = data
}
private[sql] class GroupableUDT extends UserDefinedType[GroupableData] {
override def sqlType: DataType = IntegerType
override def serialize(groupableData: GroupableData): Int = groupableData.data
override def deserialize(datum: Any): GroupableData = {
datum match {
case data: Int => GroupableData(data)
}
}
override def userClass: Class[GroupableData] = classOf[GroupableData]
private[spark] override def asNullable: GroupableUDT = this
}
private[sql] case class UngroupableData(data: Map[Int, Int]) {
def getData: Map[Int, Int] = data
}
private[sql] class UngroupableUDT extends UserDefinedType[UngroupableData] {
override def sqlType: DataType = MapType(IntegerType, IntegerType)
override def serialize(ungroupableData: UngroupableData): MapData = {
val keyArray = new GenericArrayData(ungroupableData.data.keys.toSeq)
val valueArray = new GenericArrayData(ungroupableData.data.values.toSeq)
new ArrayBasedMapData(keyArray, valueArray)
}
override def deserialize(datum: Any): UngroupableData = {
datum match {
case data: MapData =>
val keyArray = data.keyArray().array
val valueArray = data.valueArray().array
assert(keyArray.length == valueArray.length)
val mapData = keyArray.zip(valueArray).toMap.asInstanceOf[Map[Int, Int]]
UngroupableData(mapData)
}
}
override def userClass: Class[UngroupableData] = classOf[UngroupableData]
private[spark] override def asNullable: UngroupableUDT = this
}
case class TestFunction(
children: Seq[Expression],
inputTypes: Seq[AbstractDataType])
extends Expression with ImplicitCastInputTypes with Unevaluable {
override def nullable: Boolean = true
override def dataType: DataType = StringType
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression =
copy(children = newChildren)
}
case class UnresolvedTestPlan() extends LeafNode {
override lazy val resolved = false
override def output: Seq[Attribute] = Nil
}
class AnalysisErrorSuite extends AnalysisTest {
import TestRelations._
def errorTest(
name: String,
plan: LogicalPlan,
errorMessages: Seq[String],
caseSensitive: Boolean = true): Unit = {
test(name) {
assertAnalysisError(plan, errorMessages, caseSensitive)
}
}
val dateLit = Literal.create(null, DateType)
errorTest(
"scalar subquery with 2 columns",
testRelation.select(
(ScalarSubquery(testRelation.select($"a", dateLit.as("b"))) + Literal(1)).as("a")),
"Scalar subquery must return only one column, but got 2" :: Nil)
errorTest(
"scalar subquery with no column",
testRelation.select(ScalarSubquery(LocalRelation()).as("a")),
"Scalar subquery must return only one column, but got 0" :: Nil)
errorTest(
"single invalid type, single arg",
testRelation.select(TestFunction(dateLit :: Nil, IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE))" :: "argument 1" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"single invalid type, second arg",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, DateType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"multiple invalid type",
testRelation.select(
TestFunction(dateLit :: dateLit :: Nil, IntegerType :: IntegerType :: Nil).as("a")),
"cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" ::
"argument 1" :: "argument 2" :: "requires int type" ::
"'CAST(NULL AS DATE)' is of date type" :: Nil)
errorTest(
"invalid window function",
testRelation2.select(
WindowExpression(
Literal(0),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"not supported within a window function" :: Nil)
errorTest(
"distinct aggregate function in window",
testRelation2.select(
WindowExpression(
AggregateExpression(Count(UnresolvedAttribute("b")), Complete, isDistinct = true),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"Distinct window functions are not supported" :: Nil)
errorTest(
"window aggregate function with filter predicate",
testRelation2.select(
WindowExpression(
AggregateExpression(
Count(UnresolvedAttribute("b")),
Complete,
isDistinct = false,
filter = Some(UnresolvedAttribute("b") > 1)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
UnspecifiedFrame)).as("window")),
"window aggregate function with filter predicate is not supported" :: Nil
)
errorTest(
"distinct function",
CatalystSqlParser.parsePlan("SELECT hex(DISTINCT a) FROM TaBlE"),
"Function hex does not support DISTINCT" :: Nil)
errorTest(
"non aggregate function with filter predicate",
CatalystSqlParser.parsePlan("SELECT hex(a) FILTER (WHERE c = 1) FROM TaBlE2"),
"Function hex does not support FILTER clause" :: Nil)
errorTest(
"distinct window function",
CatalystSqlParser.parsePlan("SELECT percent_rank(DISTINCT a) OVER () FROM TaBlE"),
"Function percent_rank does not support DISTINCT" :: Nil)
errorTest(
"window function with filter predicate",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) FILTER (WHERE c > 1) OVER () FROM TaBlE2"),
"Function percent_rank does not support FILTER clause" :: Nil)
errorTest(
"higher order function with filter predicate",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"FILTER (WHERE c > 1)"),
"FILTER predicate specified, but aggregate is not an aggregate function" :: Nil)
errorTest(
"non-deterministic filter predicate in aggregate functions",
CatalystSqlParser.parsePlan("SELECT count(a) FILTER (WHERE rand(int(c)) > 1) FROM TaBlE2"),
"FILTER expression is non-deterministic, it cannot be used in aggregate functions" :: Nil)
errorTest(
"function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT hex(a) IGNORE NULLS FROM TaBlE2"),
"Function hex does not support IGNORE NULLS" :: Nil)
errorTest(
"some window function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT percent_rank(a) IGNORE NULLS FROM TaBlE2"),
"Function percent_rank does not support IGNORE NULLS" :: Nil)
errorTest(
"aggregate function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT count(a) IGNORE NULLS FROM TaBlE2"),
"Function count does not support IGNORE NULLS" :: Nil)
errorTest(
"higher order function don't support ignore nulls",
CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " +
"IGNORE NULLS"), "Function aggregate does not support IGNORE NULLS" :: Nil)
errorTest(
"nested aggregate functions",
testRelation.groupBy($"a")(
AggregateExpression(
Max(AggregateExpression(Count(Literal(1)), Complete, isDistinct = false)),
Complete,
isDistinct = false)),
"not allowed to use an aggregate function in the argument of another aggregate function." :: Nil
)
errorTest(
"offset window function",
testRelation2.select(
WindowExpression(
new Lead(UnresolvedAttribute("b")),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RangeFrame, Literal(1), Literal(2)))).as("window")),
"Cannot specify window frame for lead function" :: Nil)
errorTest(
"the offset of nth_value window function is negative or zero",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(0)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"The 'offset' argument of nth_value must be greater than zero but it is 0." :: Nil)
errorTest(
"the offset of nth_value window function is not int literal",
testRelation2.select(
WindowExpression(
new NthValue(AttributeReference("b", IntegerType)(), Literal(true)),
WindowSpecDefinition(
UnresolvedAttribute("a") :: Nil,
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")),
"argument 2 requires int type, however, 'true' is of boolean type." :: Nil)
errorTest(
"too many generators",
listRelation.select(Explode($"list").as("a"), Explode($"list").as("b")),
"only one generator" :: "explode" :: Nil)
errorTest(
"unresolved attributes",
testRelation.select($"abcd"),
"cannot resolve" :: "abcd" :: Nil)
errorTest(
"unresolved attributes with a generated name",
testRelation2.groupBy($"a")(max($"b"))
.where(sum($"b") > 0)
.orderBy($"havingCondition".asc),
"cannot resolve" :: "havingCondition" :: Nil)
errorTest(
"unresolved star expansion in max",
testRelation2.groupBy($"a")(sum(UnresolvedStar(None))),
"Invalid usage of '*'" :: "in expression 'sum'" :: Nil)
errorTest(
"sorting by unsupported column types",
mapRelation.orderBy($"map".asc),
"sort" :: "type" :: "map<int,int>" :: Nil)
errorTest(
"sorting by attributes are not from grouping expressions",
testRelation2.groupBy($"a", $"c")($"a", $"c", count($"a").as("a3")).orderBy($"b".asc),
"cannot resolve" :: "'b'" :: "given input columns" :: "[a, a3, c]" :: Nil)
errorTest(
"non-boolean filters",
testRelation.where(Literal(1)),
"filter" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"non-boolean join conditions",
testRelation.join(testRelation, condition = Some(Literal(1))),
"condition" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil)
errorTest(
"missing group by",
testRelation2.groupBy($"a")($"b"),
"'b'" :: "group by" :: Nil
)
errorTest(
"ambiguous field",
nestedRelation.select($"top.duplicateField"),
"Ambiguous reference to fields" :: "duplicateField" :: Nil,
caseSensitive = false)
errorTest(
"ambiguous field due to case insensitivity",
nestedRelation.select($"top.differentCase"),
"Ambiguous reference to fields" :: "differentCase" :: "differentcase" :: Nil,
caseSensitive = false)
errorTest(
"missing field",
nestedRelation2.select($"top.c"),
"No such struct field" :: "aField" :: "bField" :: "cField" :: Nil,
caseSensitive = false)
errorTest(
"catch all unresolved plan",
UnresolvedTestPlan(),
"unresolved" :: Nil)
errorTest(
"union with unequal number of columns",
testRelation.union(testRelation2),
"union" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"intersect with unequal number of columns",
testRelation.intersect(testRelation2, isAll = false),
"intersect" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"except with unequal number of columns",
testRelation.except(testRelation2, isAll = false),
"except" :: "number of columns" :: testRelation2.output.length.toString ::
testRelation.output.length.toString :: Nil)
errorTest(
"union with incompatible column types",
testRelation.union(nestedRelation),
"union" :: "the compatible column types" :: Nil)
errorTest(
"union with a incompatible column type and compatible column types",
testRelation3.union(testRelation4),
"union" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"intersect with incompatible column types",
testRelation.intersect(nestedRelation, isAll = false),
"intersect" :: "the compatible column types" :: Nil)
errorTest(
"intersect with a incompatible column type and compatible column types",
testRelation3.intersect(testRelation4, isAll = false),
"intersect" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"except with incompatible column types",
testRelation.except(nestedRelation, isAll = false),
"except" :: "the compatible column types" :: Nil)
errorTest(
"except with a incompatible column type and compatible column types",
testRelation3.except(testRelation4, isAll = false),
"except" :: "the compatible column types" :: "map" :: "decimal" :: Nil)
errorTest(
"SPARK-9955: correct error message for aggregate",
// When parse SQL string, we will wrap aggregate expressions with UnresolvedAlias.
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"cannot resolve 'bad_column'" :: Nil)
errorTest(
"slide duration greater than window in time window",
testRelation2.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "2 second", "0 second").as("window")),
s"The slide duration " :: " must be less than or equal to the windowDuration " :: Nil
)
errorTest(
"start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time greater than slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 minute").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"SPARK-21590: absolute value of start time equal to slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 second").as("window")),
"The absolute value of start time " :: " must be less than the slideDuration " :: Nil
)
errorTest(
"negative window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "-1 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero window duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "0 second", "1 second", "0 second").as("window")),
"The window duration " :: " must be greater than 0." :: Nil
)
errorTest(
"negative slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "-1 second", "0 second").as("window")),
"The slide duration " :: " must be greater than 0." :: Nil
)
errorTest(
"zero slide duration in time window",
testRelation.select(
TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "0 second", "0 second").as("window")),
"The slide duration" :: " must be greater than 0." :: Nil
)
errorTest(
"generator nested in expressions",
listRelation.select(Explode($"list") + 1),
"Generators are not supported when it's nested in expressions, but got: (explode(list) + 1)"
:: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators",
{
val nestedListRelation = LocalRelation(
AttributeReference("nestedList", ArrayType(ArrayType(IntegerType)))())
nestedListRelation.select(Explode(Explode($"nestedList")))
},
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(nestedList))" :: Nil
)
errorTest(
"SPARK-30998: unsupported nested inner generators for aggregates",
testRelation.select(Explode(Explode(
CreateArray(CreateArray(min($"a") :: max($"a") :: Nil) :: Nil)))),
"Generators are not supported when it's nested in expressions, but got: " +
"explode(explode(array(array(min(a), max(a)))))" :: Nil
)
errorTest(
"generator nested in expressions for aggregates",
testRelation.select(Explode(CreateArray(min($"a") :: max($"a") :: Nil)) + 1),
"Generators are not supported when it's nested in expressions, but got: " +
"(explode(array(min(a), max(a))) + 1)" :: Nil
)
errorTest(
"generator appears in operator which is not Project",
listRelation.sortBy(Explode($"list").asc),
"Generators are not supported outside the SELECT clause, but got: Sort" :: Nil
)
errorTest(
"an evaluated limit class must not be null",
testRelation.limit(Literal(null, IntegerType)),
"The evaluated limit expression must not be null, but got " :: Nil
)
errorTest(
"num_rows in limit clause must be equal to or greater than 0",
listRelation.limit(-1),
"The limit expression must be equal to or greater than 0, but got -1" :: Nil
)
errorTest(
"more than one generators in SELECT",
listRelation.select(Explode($"list"), Explode($"list")),
"Only one generator allowed per select clause but found 2: explode(list), explode(list)" :: Nil
)
errorTest(
"more than one generators for aggregates in SELECT",
testRelation.select(Explode(CreateArray(min($"a") :: Nil)),
Explode(CreateArray(max($"a") :: Nil))),
"Only one generator allowed per select clause but found 2: " +
"explode(array(min(a))), explode(array(max(a)))" :: Nil
)
test("SPARK-6452 regression test") {
// CheckAnalysis should throw AnalysisException when Aggregate contains missing attribute(s)
// Since we manually construct the logical plan at here and Sum only accept
// LongType, DoubleType, and DecimalType. We use LongType as the type of a.
val attrA = AttributeReference("a", LongType)(exprId = ExprId(1))
val otherA = AttributeReference("a", LongType)(exprId = ExprId(2))
val attrC = AttributeReference("c", LongType)(exprId = ExprId(3))
val aliases = Alias(sum(attrA), "b")() :: Alias(sum(attrC), "d")() :: Nil
val plan = Aggregate(
Nil,
aliases,
LocalRelation(otherA))
assert(plan.resolved)
val resolved = s"${attrA.toString},${attrC.toString}"
val errorMsg = s"Resolved attribute(s) $resolved missing from ${otherA.toString} " +
s"in operator !Aggregate [${aliases.mkString(", ")}]. " +
s"Attribute(s) with the same name appear in the operation: a. " +
"Please check if the right attribute(s) are used."
assertAnalysisError(plan, errorMsg :: Nil)
}
test("error test for self-join") {
val join = Join(testRelation, testRelation, Cross, None, JoinHint.NONE)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(join)
}
assert(error.message.contains("Failure when resolving conflicting references in Join"))
assert(error.message.contains("Conflicting attributes"))
}
test("check grouping expression data types") {
def checkDataType(dataType: DataType, shouldSuccess: Boolean): Unit = {
val plan =
Aggregate(
AttributeReference("a", dataType)(exprId = ExprId(2)) :: Nil,
Alias(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", dataType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
if (shouldSuccess) {
assertAnalysisSuccess(plan, true)
} else {
assertAnalysisError(plan, "expression a cannot be used as a grouping expression" :: Nil)
}
}
val supportedDataTypes = Seq(
StringType, BinaryType,
NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", StringType, nullable = true),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", ArrayType(BooleanType, containsNull = true), nullable = true),
new GroupableUDT())
supportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = true)
}
val unsupportedDataTypes = Seq(
MapType(StringType, LongType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", MapType(StringType, LongType), nullable = true),
new UngroupableUDT())
unsupportedDataTypes.foreach { dataType =>
checkDataType(dataType, shouldSuccess = false)
}
}
test("we should fail analysis when we find nested aggregate functions") {
val plan =
Aggregate(
AttributeReference("a", IntegerType)(exprId = ExprId(2)) :: Nil,
Alias(sum(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1)))), "c")() :: Nil,
LocalRelation(
AttributeReference("a", IntegerType)(exprId = ExprId(2)),
AttributeReference("b", IntegerType)(exprId = ExprId(1))))
assertAnalysisError(
plan,
"It is not allowed to use an aggregate function in the argument of " +
"another aggregate function." :: Nil)
}
test("Join can work on binary types but can't work on map types") {
val left = LocalRelation(Symbol("a").binary, Symbol("b").map(StringType, StringType))
val right = LocalRelation(Symbol("c").binary, Symbol("d").map(StringType, StringType))
val plan1 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("a") === Symbol("c")))
assertAnalysisSuccess(plan1)
val plan2 = left.join(
right,
joinType = Cross,
condition = Some(Symbol("b") === Symbol("d")))
assertAnalysisError(plan2, "EqualTo does not support ordering on type map" :: Nil)
}
test("PredicateSubQuery is used outside of a filter") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Project(
Seq(a, Alias(InSubquery(Seq(a), ListQuery(LocalRelation(b))), "c")()),
LocalRelation(a))
assertAnalysisError(plan, "Predicate sub-queries can only be used" +
" in Filter" :: Nil)
}
test("PredicateSubQuery correlated predicate is nested in an illegal plan") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val plan1 = Filter(
Exists(
Join(
LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LeftOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan1, "Accessing outer query column is not allowed in" :: Nil)
val plan2 = Filter(
Exists(
Join(
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)),
LocalRelation(b),
RightOuter,
Option(EqualTo(b, c)),
JoinHint.NONE)),
LocalRelation(a))
assertAnalysisError(plan2, "Accessing outer query column is not allowed in" :: Nil)
val plan3 = Filter(
Exists(Union(LocalRelation(b),
Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)))),
LocalRelation(a))
assertAnalysisError(plan3, "Accessing outer query column is not allowed in" :: Nil)
val plan4 = Filter(
Exists(
Limit(1,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b)))
),
LocalRelation(a))
assertAnalysisError(plan4, "Accessing outer query column is not allowed in" :: Nil)
val plan5 = Filter(
Exists(
Sample(0.0, 0.5, false, 1L,
Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b))).select("b")
),
LocalRelation(a))
assertAnalysisError(plan5,
"Accessing outer query column is not allowed in" :: Nil)
}
test("Error on filter condition containing aggregate expressions") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val plan = Filter(Symbol("a") === UnresolvedFunction("max", Seq(b), true), LocalRelation(a, b))
assertAnalysisError(plan,
"Aggregate/Window/Generate expressions are not valid in where clause of the query" :: Nil)
}
test("SPARK-30811: CTE should not cause stack overflow when " +
"it refers to non-existent table with same name") {
val plan = With(
UnresolvedRelation(TableIdentifier("t")),
Seq("t" -> SubqueryAlias("t",
Project(
Alias(Literal(1), "x")() :: Nil,
UnresolvedRelation(TableIdentifier("t", Option("nonexist")))))))
assertAnalysisError(plan, "Table or view not found:" :: Nil)
}
test("SPARK-33909: Check rand functions seed is legal at analyer side") {
Seq(Rand("a".attr), Randn("a".attr)).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"Input argument to ${r.prettyName} must be a constant." :: Nil)
}
Seq(Rand(1.0), Rand("1"), Randn("a")).foreach { r =>
val plan = Project(Seq(r.as("r")), testRelation)
assertAnalysisError(plan,
s"data type mismatch: argument 1 requires (int or bigint) type" :: Nil)
}
}
test("SPARK-34946: correlated scalar subquery in grouping expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
) :: Nil,
sum($"c2").as("sum") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subqueries in the group by clause must also be " +
"in the aggregate expressions" :: Nil)
}
test("SPARK-34946: correlated scalar subquery in aggregate expressions only") {
val c1 = AttributeReference("c1", IntegerType)()
val c2 = AttributeReference("c2", IntegerType)()
val t = LocalRelation(c1, c2)
val plan = Aggregate(
$"c1" :: Nil,
ScalarSubquery(
Aggregate(Nil, sum($"c2").as("sum") :: Nil,
Filter($"t1.c1" === $"t2.c1",
t.as("t2")))
).as("sub") :: Nil, t.as("t1"))
assertAnalysisError(plan, "Correlated scalar subquery 'scalarsubquery(t1.c1)' is " +
"neither present in the group by, nor in an aggregate function. Add it to group by " +
"using ordinal position or wrap it in first() (or first_value) if you don't care " +
"which value you get." :: Nil)
}
errorTest(
"SC-69611: error code to error message",
testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))),
"cannot resolve 'bad_column' given input columns: [a, b, c, d, e]" :: Nil)
test("SPARK-35080: Unsupported correlated equality predicates in subquery") {
val a = AttributeReference("a", IntegerType)()
val b = AttributeReference("b", IntegerType)()
val c = AttributeReference("c", IntegerType)()
val t1 = LocalRelation(a, b)
val t2 = LocalRelation(c)
val conditions = Seq(
(abs($"a") === $"c", "abs(a) = outer(c)"),
(abs($"a") <=> $"c", "abs(a) <=> outer(c)"),
($"a" + 1 === $"c", "(a + 1) = outer(c)"),
($"a" + $"b" === $"c", "(a + b) = outer(c)"),
($"a" + $"c" === $"b", "(a + outer(c)) = b"),
(And($"a" === $"c", Cast($"a", IntegerType) === $"c"), "CAST(a AS INT) = outer(c)"))
conditions.foreach { case (cond, msg) =>
val plan = Project(
ScalarSubquery(
Aggregate(Nil, count(Literal(1)).as("cnt") :: Nil,
Filter(cond, t1))
).as("sub") :: Nil,
t2)
assertAnalysisError(plan, s"Correlated column is not allowed in predicate ($msg)" :: Nil)
}
}
test("SPARK-35673: fail if the plan still contains UnresolvedHint after analysis") {
val hintName = "some_random_hint_that_does_not_exist"
val plan = UnresolvedHint(hintName, Seq.empty,
Project(Alias(Literal(1), "x")() :: Nil, OneRowRelation())
)
assert(plan.resolved)
val error = intercept[AnalysisException] {
SimpleAnalyzer.checkAnalysis(plan)
}
assert(error.message.contains(s"Hint not found: ${hintName}"))
// UnresolvedHint be removed by batch `Remove Unresolved Hints`
assertAnalysisSuccess(plan, true)
}
}
| wangmiao1981/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala | Scala | apache-2.0 | 31,135 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, Date, List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.mesos.{Scheduler, SchedulerDriver}
import org.apache.mesos.Protos.{TaskState => MesosTaskState, _}
import org.apache.mesos.Protos.Environment.Variable
import org.apache.mesos.Protos.TaskStatus.Reason
import org.apache.spark.{SecurityManager, SparkConf, SparkException, TaskState}
import org.apache.spark.deploy.mesos.{config, MesosDriverDescription}
import org.apache.spark.deploy.rest.{CreateSubmissionResponse, KillSubmissionResponse, SubmissionStatusResponse}
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.util.Utils
/**
* Tracks the current state of a Mesos Task that runs a Spark driver.
* @param driverDescription Submitted driver description from
* [[org.apache.spark.deploy.rest.mesos.MesosRestServer]]
* @param taskId Mesos TaskID generated for the task
* @param slaveId Slave ID that the task is assigned to
* @param mesosTaskStatus The last known task status update.
* @param startDate The date the task was launched
* @param finishDate The date the task finished
* @param frameworkId Mesos framework ID the task registers with
*/
private[spark] class MesosClusterSubmissionState(
val driverDescription: MesosDriverDescription,
val taskId: TaskID,
val slaveId: SlaveID,
var mesosTaskStatus: Option[TaskStatus],
var startDate: Date,
var finishDate: Option[Date],
val frameworkId: String)
extends Serializable {
def copy(): MesosClusterSubmissionState = {
new MesosClusterSubmissionState(
driverDescription, taskId, slaveId, mesosTaskStatus, startDate, finishDate, frameworkId)
}
}
/**
* Tracks the retry state of a driver, which includes the next time it should be scheduled
* and necessary information to do exponential backoff.
* This class is not thread-safe, and we expect the caller to handle synchronizing state.
*
* @param lastFailureStatus Last Task status when it failed.
* @param retries Number of times it has been retried.
* @param nextRetry Time at which it should be retried next
* @param waitTime The amount of time driver is scheduled to wait until next retry.
*/
private[spark] class MesosClusterRetryState(
val lastFailureStatus: TaskStatus,
val retries: Int,
val nextRetry: Date,
val waitTime: Int) extends Serializable {
def copy(): MesosClusterRetryState =
new MesosClusterRetryState(lastFailureStatus, retries, nextRetry, waitTime)
}
/**
* The full state of the cluster scheduler, currently being used for displaying
* information on the UI.
*
* @param frameworkId Mesos Framework id for the cluster scheduler.
* @param masterUrl The Mesos master url
* @param queuedDrivers All drivers queued to be launched
* @param launchedDrivers All launched or running drivers
* @param finishedDrivers All terminated drivers
* @param pendingRetryDrivers All drivers pending to be retried
*/
private[spark] class MesosClusterSchedulerState(
val frameworkId: String,
val masterUrl: Option[String],
val queuedDrivers: Iterable[MesosDriverDescription],
val launchedDrivers: Iterable[MesosClusterSubmissionState],
val finishedDrivers: Iterable[MesosClusterSubmissionState],
val pendingRetryDrivers: Iterable[MesosDriverDescription])
/**
* The full state of a Mesos driver, that is being used to display driver information on the UI.
*/
private[spark] class MesosDriverState(
val state: String,
val description: MesosDriverDescription,
val submissionState: Option[MesosClusterSubmissionState] = None)
/**
* A Mesos scheduler that is responsible for launching submitted Spark drivers in cluster mode
* as Mesos tasks in a Mesos cluster.
* All drivers are launched asynchronously by the framework, which will eventually be launched
* by one of the slaves in the cluster. The results of the driver will be stored in slave's task
* sandbox which is accessible by visiting the Mesos UI.
* This scheduler supports recovery by persisting all its state and performs task reconciliation
* on recover, which gets all the latest state for all the drivers from Mesos master.
*/
private[spark] class MesosClusterScheduler(
engineFactory: MesosClusterPersistenceEngineFactory,
conf: SparkConf)
extends Scheduler with MesosSchedulerUtils {
var frameworkUrl: String = _
private val metricsSystem =
MetricsSystem.createMetricsSystem("mesos_cluster", conf, new SecurityManager(conf))
private val master = conf.get("spark.master")
private val appName = conf.get("spark.app.name")
private val queuedCapacity = conf.getInt("spark.mesos.maxDrivers", 200)
private val retainedDrivers = conf.getInt("spark.mesos.retainedDrivers", 200)
private val maxRetryWaitTime = conf.getInt("spark.mesos.cluster.retry.wait.max", 60) // 1 minute
private val useFetchCache = conf.getBoolean("spark.mesos.fetchCache.enable", false)
private val schedulerState = engineFactory.createEngine("scheduler")
private val stateLock = new Object()
// Keyed by submission id
private val finishedDrivers =
new mutable.ArrayBuffer[MesosClusterSubmissionState](retainedDrivers)
private var frameworkId: String = null
// Holds all the launched drivers and current launch state, keyed by submission id.
private val launchedDrivers = new mutable.HashMap[String, MesosClusterSubmissionState]()
// Holds a map of driver id to expected slave id that is passed to Mesos for reconciliation.
// All drivers that are loaded after failover are added here, as we need get the latest
// state of the tasks from Mesos. Keyed by task Id.
private val pendingRecover = new mutable.HashMap[String, SlaveID]()
// Stores all the submitted drivers that hasn't been launched, keyed by submission id
private val queuedDrivers = new ArrayBuffer[MesosDriverDescription]()
// All supervised drivers that are waiting to retry after termination, keyed by submission id
private val pendingRetryDrivers = new ArrayBuffer[MesosDriverDescription]()
private val queuedDriversState = engineFactory.createEngine("driverQueue")
private val launchedDriversState = engineFactory.createEngine("launchedDrivers")
private val pendingRetryDriversState = engineFactory.createEngine("retryList")
private final val RETRY_SEP = "-retry-"
// Flag to mark if the scheduler is ready to be called, which is until the scheduler
// is registered with Mesos master.
@volatile protected var ready = false
private var masterInfo: Option[MasterInfo] = None
private var schedulerDriver: SchedulerDriver = _
def submitDriver(desc: MesosDriverDescription): CreateSubmissionResponse = {
val c = new CreateSubmissionResponse
if (!ready) {
c.success = false
c.message = "Scheduler is not ready to take requests"
return c
}
stateLock.synchronized {
if (isQueueFull()) {
c.success = false
c.message = "Already reached maximum submission size"
return c
}
c.submissionId = desc.submissionId
c.success = true
addDriverToQueue(desc)
}
c
}
def killDriver(submissionId: String): KillSubmissionResponse = {
val k = new KillSubmissionResponse
if (!ready) {
k.success = false
k.message = "Scheduler is not ready to take requests"
return k
}
k.submissionId = submissionId
stateLock.synchronized {
// We look for the requested driver in the following places:
// 1. Check if submission is running or launched.
// 2. Check if it's still queued.
// 3. Check if it's in the retry list.
// 4. Check if it has already completed.
if (launchedDrivers.contains(submissionId)) {
val state = launchedDrivers(submissionId)
schedulerDriver.killTask(state.taskId)
k.success = true
k.message = "Killing running driver"
} else if (removeFromQueuedDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's still pending"
} else if (removeFromPendingRetryDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's being retried"
} else if (finishedDrivers.exists(_.driverDescription.submissionId == submissionId)) {
k.success = false
k.message = "Driver already terminated"
} else {
k.success = false
k.message = "Cannot find driver"
}
}
k
}
def getDriverStatus(submissionId: String): SubmissionStatusResponse = {
val s = new SubmissionStatusResponse
if (!ready) {
s.success = false
s.message = "Scheduler is not ready to take requests"
return s
}
s.submissionId = submissionId
stateLock.synchronized {
if (queuedDrivers.exists(_.submissionId == submissionId)) {
s.success = true
s.driverState = "QUEUED"
} else if (launchedDrivers.contains(submissionId)) {
s.success = true
s.driverState = "RUNNING"
launchedDrivers(submissionId).mesosTaskStatus.foreach(state => s.message = state.toString)
} else if (finishedDrivers.exists(_.driverDescription.submissionId == submissionId)) {
s.success = true
s.driverState = "FINISHED"
finishedDrivers
.find(d => d.driverDescription.submissionId.equals(submissionId)).get.mesosTaskStatus
.foreach(state => s.message = state.toString)
} else if (pendingRetryDrivers.exists(_.submissionId == submissionId)) {
val status = pendingRetryDrivers.find(_.submissionId == submissionId)
.get.retryState.get.lastFailureStatus
s.success = true
s.driverState = "RETRYING"
s.message = status.toString
} else {
s.success = false
s.driverState = "NOT_FOUND"
}
}
s
}
/**
* Gets the driver state to be displayed on the Web UI.
*/
def getDriverState(submissionId: String): Option[MesosDriverState] = {
stateLock.synchronized {
queuedDrivers.find(_.submissionId == submissionId)
.map(d => new MesosDriverState("QUEUED", d))
.orElse(launchedDrivers.get(submissionId)
.map(d => new MesosDriverState("RUNNING", d.driverDescription, Some(d))))
.orElse(finishedDrivers.find(_.driverDescription.submissionId == submissionId)
.map(d => new MesosDriverState("FINISHED", d.driverDescription, Some(d))))
.orElse(pendingRetryDrivers.find(_.submissionId == submissionId)
.map(d => new MesosDriverState("RETRYING", d)))
}
}
private def isQueueFull(): Boolean = launchedDrivers.size >= queuedCapacity
/**
* Recover scheduler state that is persisted.
* We still need to do task reconciliation to be up to date of the latest task states
* as it might have changed while the scheduler is failing over.
*/
private def recoverState(): Unit = {
stateLock.synchronized {
launchedDriversState.fetchAll[MesosClusterSubmissionState]().foreach { state =>
launchedDrivers(state.driverDescription.submissionId) = state
pendingRecover(state.taskId.getValue) = state.slaveId
}
queuedDriversState.fetchAll[MesosDriverDescription]().foreach(d => queuedDrivers += d)
// There is potential timing issue where a queued driver might have been launched
// but the scheduler shuts down before the queued driver was able to be removed
// from the queue. We try to mitigate this issue by walking through all queued drivers
// and remove if they're already launched.
queuedDrivers
.filter(d => launchedDrivers.contains(d.submissionId))
.foreach(d => removeFromQueuedDrivers(d.submissionId))
pendingRetryDriversState.fetchAll[MesosDriverDescription]()
.foreach(s => pendingRetryDrivers += s)
// TODO: Consider storing finished drivers so we can show them on the UI after
// failover. For now we clear the history on each recovery.
finishedDrivers.clear()
}
}
/**
* Starts the cluster scheduler and wait until the scheduler is registered.
* This also marks the scheduler to be ready for requests.
*/
def start(): Unit = {
// TODO: Implement leader election to make sure only one framework running in the cluster.
val fwId = schedulerState.fetch[String]("frameworkId")
fwId.foreach { id =>
frameworkId = id
}
recoverState()
metricsSystem.registerSource(new MesosClusterSchedulerSource(this))
metricsSystem.start()
val driver = createSchedulerDriver(
master,
MesosClusterScheduler.this,
Utils.getCurrentUserName(),
appName,
conf,
Some(frameworkUrl),
Some(true),
Some(Integer.MAX_VALUE),
fwId)
startScheduler(driver)
ready = true
}
def stop(): Unit = {
ready = false
metricsSystem.report()
metricsSystem.stop()
schedulerDriver.stop(true)
}
override def registered(
driver: SchedulerDriver,
newFrameworkId: FrameworkID,
masterInfo: MasterInfo): Unit = {
logInfo("Registered as framework ID " + newFrameworkId.getValue)
if (newFrameworkId.getValue != frameworkId) {
frameworkId = newFrameworkId.getValue
schedulerState.persist("frameworkId", frameworkId)
}
markRegistered()
stateLock.synchronized {
this.masterInfo = Some(masterInfo)
this.schedulerDriver = driver
if (!pendingRecover.isEmpty) {
// Start task reconciliation if we need to recover.
val statuses = pendingRecover.collect {
case (taskId, slaveId) =>
val newStatus = TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId).build())
.setSlaveId(slaveId)
.setState(MesosTaskState.TASK_STAGING)
.build()
launchedDrivers.get(getSubmissionIdFromTaskId(taskId))
.map(_.mesosTaskStatus.getOrElse(newStatus))
.getOrElse(newStatus)
}
// TODO: Page the status updates to avoid trying to reconcile
// a large amount of tasks at once.
driver.reconcileTasks(statuses.toSeq.asJava)
}
}
}
private def getDriverExecutorURI(desc: MesosDriverDescription): Option[String] = {
desc.conf.getOption("spark.executor.uri")
.orElse(desc.command.environment.get("SPARK_EXECUTOR_URI"))
}
private def getDriverFrameworkID(desc: MesosDriverDescription): String = {
val retries = desc.retryState.map { d => s"${RETRY_SEP}${d.retries.toString}" }.getOrElse("")
s"${frameworkId}-${desc.submissionId}${retries}"
}
private def getDriverTaskId(desc: MesosDriverDescription): String = {
val sId = desc.submissionId
desc.retryState.map(state => sId + s"${RETRY_SEP}${state.retries.toString}").getOrElse(sId)
}
private def getSubmissionIdFromTaskId(taskId: String): String = {
taskId.split(s"${RETRY_SEP}").head
}
private def adjust[A, B](m: collection.Map[A, B], k: A, default: B)(f: B => B) = {
m.updated(k, f(m.getOrElse(k, default)))
}
private def getDriverEnvironment(desc: MesosDriverDescription): Environment = {
// TODO(mgummelt): Don't do this here. This should be passed as a --conf
val commandEnv = adjust(desc.command.environment, "SPARK_SUBMIT_OPTS", "")(
v => s"$v -Dspark.mesos.driver.frameworkId=${getDriverFrameworkID(desc)}"
)
val env = desc.conf.getAllWithPrefix("spark.mesos.driverEnv.") ++ commandEnv
val envBuilder = Environment.newBuilder()
// add normal environment variables
env.foreach { case (k, v) =>
envBuilder.addVariables(Variable.newBuilder().setName(k).setValue(v))
}
// add secret environment variables
MesosSchedulerBackendUtil.getSecretEnvVar(desc.conf, config.driverSecretConfig)
.foreach { variable =>
if (variable.getSecret.getReference.isInitialized) {
logInfo(s"Setting reference secret ${variable.getSecret.getReference.getName} " +
s"on file ${variable.getName}")
} else {
logInfo(s"Setting secret on environment variable name=${variable.getName}")
}
envBuilder.addVariables(variable)
}
envBuilder.build()
}
private def isContainerLocalAppJar(desc: MesosDriverDescription): Boolean = {
val isLocalJar = desc.jarUrl.startsWith("local://")
val isContainerLocal = desc.conf.getOption("spark.mesos.appJar.local.resolution.mode").exists {
case "container" => true
case "host" => false
case other =>
logWarning(s"Unknown spark.mesos.appJar.local.resolution.mode $other, using host.")
false
}
isLocalJar && isContainerLocal
}
private def getDriverUris(desc: MesosDriverDescription): List[CommandInfo.URI] = {
val confUris = List(conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.submit.pyFiles")).flatMap(
_.map(_.split(",").map(_.trim))
).flatten
if (isContainerLocalAppJar(desc)) {
(confUris ++ getDriverExecutorURI(desc).toList).map(uri =>
CommandInfo.URI.newBuilder().setValue(uri.trim()).setCache(useFetchCache).build())
} else {
val jarUrl = desc.jarUrl.stripPrefix("file:").stripPrefix("local:")
((jarUrl :: confUris) ++ getDriverExecutorURI(desc).toList).map(uri =>
CommandInfo.URI.newBuilder().setValue(uri.trim()).setCache(useFetchCache).build())
}
}
private def getContainerInfo(desc: MesosDriverDescription): ContainerInfo.Builder = {
val containerInfo = MesosSchedulerBackendUtil.buildContainerInfo(desc.conf)
MesosSchedulerBackendUtil.getSecretVolume(desc.conf, config.driverSecretConfig)
.foreach { volume =>
if (volume.getSource.getSecret.getReference.isInitialized) {
logInfo(s"Setting reference secret ${volume.getSource.getSecret.getReference.getName} " +
s"on file ${volume.getContainerPath}")
} else {
logInfo(s"Setting secret on file name=${volume.getContainerPath}")
}
containerInfo.addVolumes(volume)
}
containerInfo
}
private def getDriverCommandValue(desc: MesosDriverDescription): String = {
val dockerDefined = desc.conf.contains("spark.mesos.executor.docker.image")
val executorUri = getDriverExecutorURI(desc)
// Gets the path to run spark-submit, and the path to the Mesos sandbox.
val (executable, sandboxPath) = if (dockerDefined) {
// Application jar is automatically downloaded in the mounted sandbox by Mesos,
// and the path to the mounted volume is stored in $MESOS_SANDBOX env variable.
("./bin/spark-submit", "$MESOS_SANDBOX")
} else if (executorUri.isDefined) {
val folderBasename = executorUri.get.split('/').last.split('.').head
val entries = conf.getOption("spark.executor.extraLibraryPath")
.map(path => Seq(path) ++ desc.command.libraryPathEntries)
.getOrElse(desc.command.libraryPathEntries)
val prefixEnv = if (!entries.isEmpty) Utils.libraryPathEnvPrefix(entries) else ""
val cmdExecutable = s"cd $folderBasename*; $prefixEnv bin/spark-submit"
// Sandbox path points to the parent folder as we chdir into the folderBasename.
(cmdExecutable, "..")
} else {
val executorSparkHome = desc.conf.getOption("spark.mesos.executor.home")
.orElse(conf.getOption("spark.home"))
.orElse(Option(System.getenv("SPARK_HOME")))
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val cmdExecutable = new File(executorSparkHome, "./bin/spark-submit").getPath
// Sandbox points to the current directory by default with Mesos.
(cmdExecutable, ".")
}
val cmdOptions = generateCmdOption(desc, sandboxPath).mkString(" ")
val primaryResource = {
if (isContainerLocalAppJar(desc)) {
new File(desc.jarUrl.stripPrefix("local://")).toString()
} else {
new File(sandboxPath, desc.jarUrl.split("/").last).toString()
}
}
val appArguments = desc.command.arguments.mkString(" ")
s"$executable $cmdOptions $primaryResource $appArguments"
}
private def buildDriverCommand(desc: MesosDriverDescription): CommandInfo = {
val builder = CommandInfo.newBuilder()
builder.setValue(getDriverCommandValue(desc))
builder.setEnvironment(getDriverEnvironment(desc))
builder.addAllUris(getDriverUris(desc).asJava)
builder.build()
}
private def generateCmdOption(desc: MesosDriverDescription, sandboxPath: String): Seq[String] = {
var options = Seq(
"--name", desc.conf.get("spark.app.name"),
"--master", s"mesos://${conf.get("spark.master")}",
"--driver-cores", desc.cores.toString,
"--driver-memory", s"${desc.mem}M")
// Assume empty main class means we're running python
if (!desc.command.mainClass.equals("")) {
options ++= Seq("--class", desc.command.mainClass)
}
desc.conf.getOption("spark.executor.memory").foreach { v =>
options ++= Seq("--executor-memory", v)
}
desc.conf.getOption("spark.cores.max").foreach { v =>
options ++= Seq("--total-executor-cores", v)
}
desc.conf.getOption("spark.submit.pyFiles").foreach { pyFiles =>
val formattedFiles = pyFiles.split(",")
.map { path => new File(sandboxPath, path.split("/").last).toString() }
.mkString(",")
options ++= Seq("--py-files", formattedFiles)
}
// --conf
val replicatedOptionsBlacklist = Set(
"spark.jars", // Avoids duplicate classes in classpath
"spark.submit.deployMode", // this would be set to `cluster`, but we need client
"spark.master" // this contains the address of the dispatcher, not master
)
val defaultConf = conf.getAllWithPrefix("spark.mesos.dispatcher.driverDefault.").toMap
val driverConf = desc.conf.getAll
.filter { case (key, _) => !replicatedOptionsBlacklist.contains(key) }
.toMap
(defaultConf ++ driverConf).foreach { case (key, value) =>
options ++= Seq("--conf", s"${key}=${value}") }
options.map(shellEscape)
}
/**
* Escape args for Unix-like shells, unless already quoted by the user.
* Based on: http://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
* and http://www.grymoire.com/Unix/Quote.html
*
* @param value argument
* @return escaped argument
*/
private[scheduler] def shellEscape(value: String): String = {
val WrappedInQuotes = """^(".+"|'.+')$""".r
val ShellSpecialChars = (""".*([ '<>&|\\?\\*;!#\\\\(\\)"$`]).*""").r
value match {
case WrappedInQuotes(c) => value // The user quoted his args, don't touch it!
case ShellSpecialChars(c) => "\\"" + value.replaceAll("""(["`\\$\\\\])""", """\\\\$1""") + "\\""
case _: String => value // Don't touch harmless strings
}
}
private class ResourceOffer(
val offer: Offer,
var remainingResources: JList[Resource],
var attributes: JList[Attribute]) {
override def toString(): String = {
s"Offer id: ${offer.getId}, resources: ${remainingResources}, attributes: ${attributes}"
}
}
private def createTaskInfo(desc: MesosDriverDescription, offer: ResourceOffer): TaskInfo = {
val taskId = TaskID.newBuilder().setValue(getDriverTaskId(desc)).build()
val (remainingResources, cpuResourcesToUse) =
partitionResources(offer.remainingResources, "cpus", desc.cores)
val (finalResources, memResourcesToUse) =
partitionResources(remainingResources.asJava, "mem", desc.mem)
offer.remainingResources = finalResources.asJava
val appName = desc.conf.get("spark.app.name")
val driverLabels = MesosProtoUtils.mesosLabels(desc.conf.get(config.DRIVER_LABELS)
.getOrElse(""))
TaskInfo.newBuilder()
.setTaskId(taskId)
.setName(s"Driver for ${appName}")
.setSlaveId(offer.offer.getSlaveId)
.setCommand(buildDriverCommand(desc))
.setContainer(getContainerInfo(desc))
.addAllResources(cpuResourcesToUse.asJava)
.addAllResources(memResourcesToUse.asJava)
.setLabels(driverLabels)
.build
}
/**
* This method takes all the possible candidates and attempt to schedule them with Mesos offers.
* Every time a new task is scheduled, the afterLaunchCallback is called to perform post scheduled
* logic on each task.
*/
private def scheduleTasks(
candidates: Seq[MesosDriverDescription],
afterLaunchCallback: (String) => Boolean,
currentOffers: List[ResourceOffer],
tasks: mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]): Unit = {
for (submission <- candidates) {
val driverCpu = submission.cores
val driverMem = submission.mem
val driverConstraints =
parseConstraintString(submission.conf.get(config.DRIVER_CONSTRAINTS))
logTrace(s"Finding offer to launch driver with cpu: $driverCpu, mem: $driverMem, " +
s"driverConstraints: $driverConstraints")
val offerOption = currentOffers.find { offer =>
getResource(offer.remainingResources, "cpus") >= driverCpu &&
getResource(offer.remainingResources, "mem") >= driverMem &&
matchesAttributeRequirements(driverConstraints, toAttributeMap(offer.attributes))
}
if (offerOption.isEmpty) {
logDebug(s"Unable to find offer to launch driver id: ${submission.submissionId}, " +
s"cpu: $driverCpu, mem: $driverMem")
} else {
val offer = offerOption.get
val queuedTasks = tasks.getOrElseUpdate(offer.offer.getId, new ArrayBuffer[TaskInfo])
try {
val task = createTaskInfo(submission, offer)
queuedTasks += task
logTrace(s"Using offer ${offer.offer.getId.getValue} to launch driver " +
submission.submissionId + s" with taskId: ${task.getTaskId.toString}")
val newState = new MesosClusterSubmissionState(
submission,
task.getTaskId,
offer.offer.getSlaveId,
None,
new Date(),
None,
getDriverFrameworkID(submission))
launchedDrivers(submission.submissionId) = newState
launchedDriversState.persist(submission.submissionId, newState)
afterLaunchCallback(submission.submissionId)
} catch {
case e: SparkException =>
afterLaunchCallback(submission.submissionId)
finishedDrivers += new MesosClusterSubmissionState(
submission,
TaskID.newBuilder().setValue(submission.submissionId).build(),
SlaveID.newBuilder().setValue("").build(),
None,
null,
None,
getDriverFrameworkID(submission))
logError(s"Failed to launch the driver with id: ${submission.submissionId}, " +
s"cpu: $driverCpu, mem: $driverMem, reason: ${e.getMessage}")
}
}
}
}
override def resourceOffers(driver: SchedulerDriver, offers: JList[Offer]): Unit = {
logTrace(s"Received offers from Mesos: \\n${offers.asScala.mkString("\\n")}")
val tasks = new mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]()
val currentTime = new Date()
val currentOffers = offers.asScala.map {
offer => new ResourceOffer(offer, offer.getResourcesList, offer.getAttributesList)
}.toList
stateLock.synchronized {
// We first schedule all the supervised drivers that are ready to retry.
// This list will be empty if none of the drivers are marked as supervise.
val driversToRetry = pendingRetryDrivers.filter { d =>
d.retryState.get.nextRetry.before(currentTime)
}
scheduleTasks(
copyBuffer(driversToRetry),
removeFromPendingRetryDrivers,
currentOffers,
tasks)
// Then we walk through the queued drivers and try to schedule them.
scheduleTasks(
copyBuffer(queuedDrivers),
removeFromQueuedDrivers,
currentOffers,
tasks)
}
tasks.foreach { case (offerId, taskInfos) =>
driver.launchTasks(Collections.singleton(offerId), taskInfos.asJava)
}
for (offer <- currentOffers if !tasks.contains(offer.offer.getId)) {
declineOffer(driver, offer.offer, None, Some(getRejectOfferDuration(conf)))
}
}
private def copyBuffer(
buffer: ArrayBuffer[MesosDriverDescription]): ArrayBuffer[MesosDriverDescription] = {
val newBuffer = new ArrayBuffer[MesosDriverDescription](buffer.size)
buffer.copyToBuffer(newBuffer)
newBuffer
}
def getSchedulerState(): MesosClusterSchedulerState = {
stateLock.synchronized {
new MesosClusterSchedulerState(
frameworkId,
masterInfo.map(m => s"http://${m.getIp}:${m.getPort}"),
copyBuffer(queuedDrivers),
launchedDrivers.values.map(_.copy()).toList,
finishedDrivers.map(_.copy()).toList,
copyBuffer(pendingRetryDrivers))
}
}
override def offerRescinded(driver: SchedulerDriver, offerId: OfferID): Unit = {}
override def disconnected(driver: SchedulerDriver): Unit = {}
override def reregistered(driver: SchedulerDriver, masterInfo: MasterInfo): Unit = {
logInfo(s"Framework re-registered with master ${masterInfo.getId}")
}
override def slaveLost(driver: SchedulerDriver, slaveId: SlaveID): Unit = {}
override def error(driver: SchedulerDriver, error: String): Unit = {
logError("Error received: " + error)
markErr()
}
/**
* Check if the task state is a recoverable state that we can relaunch the task.
* Task state like TASK_ERROR are not relaunchable state since it wasn't able
* to be validated by Mesos.
*/
private def shouldRelaunch(state: MesosTaskState): Boolean = {
state == MesosTaskState.TASK_FAILED ||
state == MesosTaskState.TASK_LOST
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
val taskId = status.getTaskId.getValue
logInfo(s"Received status update: taskId=${taskId}" +
s" state=${status.getState}" +
s" message=${status.getMessage}" +
s" reason=${status.getReason}")
stateLock.synchronized {
val subId = getSubmissionIdFromTaskId(taskId)
if (launchedDrivers.contains(subId)) {
if (status.getReason == Reason.REASON_RECONCILIATION &&
!pendingRecover.contains(taskId)) {
// Task has already received update and no longer requires reconciliation.
return
}
val state = launchedDrivers(subId)
// Check if the driver is supervise enabled and can be relaunched.
if (state.driverDescription.supervise && shouldRelaunch(status.getState)) {
removeFromLaunchedDrivers(subId)
state.finishDate = Some(new Date())
val retryState: Option[MesosClusterRetryState] = state.driverDescription.retryState
val (retries, waitTimeSec) = retryState
.map { rs => (rs.retries + 1, Math.min(maxRetryWaitTime, rs.waitTime * 2)) }
.getOrElse{ (1, 1) }
val nextRetry = new Date(new Date().getTime + waitTimeSec * 1000L)
val newDriverDescription = state.driverDescription.copy(
retryState = Some(new MesosClusterRetryState(status, retries, nextRetry, waitTimeSec)))
addDriverToPending(newDriverDescription, newDriverDescription.submissionId)
} else if (TaskState.isFinished(mesosToTaskState(status.getState))) {
retireDriver(subId, state)
}
state.mesosTaskStatus = Option(status)
} else {
logError(s"Unable to find driver with $taskId in status update")
}
}
}
private def retireDriver(
submissionId: String,
state: MesosClusterSubmissionState) = {
removeFromLaunchedDrivers(submissionId)
state.finishDate = Some(new Date())
if (finishedDrivers.size >= retainedDrivers) {
val toRemove = math.max(retainedDrivers / 10, 1)
finishedDrivers.trimStart(toRemove)
}
finishedDrivers += state
}
override def frameworkMessage(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
message: Array[Byte]): Unit = {}
override def executorLost(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
status: Int): Unit = {}
private def removeFromQueuedDrivers(subId: String): Boolean = {
val index = queuedDrivers.indexWhere(_.submissionId == subId)
if (index != -1) {
queuedDrivers.remove(index)
queuedDriversState.expunge(subId)
true
} else {
false
}
}
private def removeFromLaunchedDrivers(subId: String): Boolean = {
if (launchedDrivers.remove(subId).isDefined) {
launchedDriversState.expunge(subId)
true
} else {
false
}
}
private def removeFromPendingRetryDrivers(subId: String): Boolean = {
val index = pendingRetryDrivers.indexWhere(_.submissionId == subId)
if (index != -1) {
pendingRetryDrivers.remove(index)
pendingRetryDriversState.expunge(subId)
true
} else {
false
}
}
def getQueuedDriversSize: Int = queuedDrivers.size
def getLaunchedDriversSize: Int = launchedDrivers.size
def getPendingRetryDriversSize: Int = pendingRetryDrivers.size
private def addDriverToQueue(desc: MesosDriverDescription): Unit = {
queuedDriversState.persist(desc.submissionId, desc)
queuedDrivers += desc
revive()
}
private def addDriverToPending(desc: MesosDriverDescription, subId: String) = {
pendingRetryDriversState.persist(subId, desc)
pendingRetryDrivers += desc
revive()
}
private def revive(): Unit = {
logInfo("Reviving Offers.")
schedulerDriver.reviveOffers()
}
}
| michalsenkyr/spark | resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala | Scala | apache-2.0 | 34,728 |
package poly.collection.factory
import poly.collection._
import poly.collection.conversion.FromScala._
import scala.language.higherKinds
/**
* Represents a factory that
* - given a traversable collection whose elements are of type E[A, B],
* - requiring an evidence of type Ev[A] and Ev[B],
* - builds an object of type R[A, B].
* @author Tongfei Chen
* @since 0.1.0
*/
trait Factory2[-E[_, _], +R[_, _], Ev1[_], Ev2[_]] { self =>
/** Returns a new builder of this collection type. */
def newBuilder[A : Ev1, B: Ev2]: Builder[E[A, B], R[A, B]]
/** Grounds this factory by providing a type parameter and associated evidences. */
implicit def ground[A : Ev1, B : Ev2]: Factory0[E[A, B], R[A, B]] = new Factory0[E[A, B], R[A, B]] {
def newBuilder: Builder[E[A, B], R[A, B]] = self.newBuilder
}
/** Creates an empty collection. */
def empty[A : Ev1, B: Ev2] = newBuilder[A, B].result()
/** Creates a collection by adding the arguments into it. */
def apply[A : Ev1, B : Ev2](xs: E[A, B]*): R[A, B] = from(xs.asPoly)
/** Creates a collection by adding all the elements in the specific traversable sequence. */
def from[A : Ev1, B: Ev2](xs: Traversable[E[A, B]]) = {
val b = newBuilder[A, B]
if (xs.sizeKnown) b.sizeHint(xs.size)
b addAll xs
b.result
}
}
| ctongfei/poly-collection | core/src/main/scala/poly/collection/factory/Factory2.scala | Scala | mit | 1,311 |
package kafka.utils
import java.io._
import java.nio.channels._
/**
* A file lock a la flock/funlock
*
* The given path will be created and opened if it doesn't exist.
*/
class FileLock(val file: File) extends Logging {
file.createNewFile()
private val channel = new RandomAccessFile(file, "rw").getChannel()
private var flock: java.nio.channels.FileLock = null
/**
* Lock the file or throw an exception if the lock is already held
*/
def lock() {
this synchronized {
trace("Acquiring lock on " + file.getAbsolutePath)
flock = channel.lock()
}
}
/**
* Try to lock the file and return true if the locking succeeds
*/
def tryLock(): Boolean = {
this synchronized {
trace("Acquiring lock on " + file.getAbsolutePath)
try {
// weirdly this method will return null if the lock is held by another
// process, but will throw an exception if the lock is held by this process
// so we have to handle both cases
flock = channel.tryLock()
flock != null
} catch {
case e: OverlappingFileLockException => false
}
}
}
/**
* Unlock the lock if it is held
*/
def unlock() {
this synchronized {
trace("Releasing lock on " + file.getAbsolutePath)
if(flock != null)
flock.release()
}
}
/**
* Destroy this lock, closing the associated FileChannel
*/
def destroy() = {
this synchronized {
unlock()
channel.close()
}
}
} | dchenbecker/kafka-sbt | core/src/main/scala/kafka/utils/FileLock.scala | Scala | apache-2.0 | 1,623 |
package lang.lambda
import name._
import name.namefix.NameFix._
import name.namegraph.NameGraphExtended
import ref.{Declaration, Structural}
/**
* Created by seba on 01/08/14.
*/
abstract class Exp extends Nominal {
type Scope = Map[String,Set[Identifier]]
def resolveNames: NameGraphExtended = resolveNames(Map())
def resolveNames(scope: Scope): NameGraphExtended
def rename(renaming: Renaming): Exp
def unsafeSubst(x: String, e: Exp): Exp
def subst(w: String, e: Exp): Exp = nameFixExtended(resolveNames, unsafeSubst(w, e))
def unsafeNormalize: Exp
def normalize = nameFixExtended(resolveNames, unsafeNormalize)
def alphaEqual(e: Exp): Boolean = {
val gthis = resolveNames
val ge = e.resolveNames
alphaEqual(e, gthis + ge)
}
def alphaEqual(e: Exp, g: NameGraphExtended): Boolean
override def asStructural = asStructural(Map())
def asStructural(g: Map[String, Declaration]): lang.lambdaref.Exp
} | seba--/hygienic-transformations | scala/src/main/scala/lang/lambda/Exp.scala | Scala | lgpl-3.0 | 943 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.report
import model.persisted.CivilServiceExperienceDetailsForDiversityReport
import play.api.libs.json.Json
case class CivilServiceExperienceDetailsReportItem(
isCivilServant: Option[String],
isEDIP: Option[String],
edipYear: Option[String],
isSDIP: Option[String],
sdipYear: Option[String],
otherInternship: Option[String],
otherInternshipName: Option[String],
otherInternshipYear: Option[String],
fastPassCertificate: Option[String]
)
object CivilServiceExperienceDetailsReportItem {
implicit val civilServiceExperienceDetailsReportItemFormat = Json.format[CivilServiceExperienceDetailsReportItem]
def apply(civilServiceExperience: CivilServiceExperienceDetailsForDiversityReport): CivilServiceExperienceDetailsReportItem = {
CivilServiceExperienceDetailsReportItem(
isCivilServant = civilServiceExperience.isCivilServant,
isEDIP = civilServiceExperience.isEDIP,
edipYear = civilServiceExperience.edipYear,
isSDIP = civilServiceExperience.isSDIP,
sdipYear = civilServiceExperience.sdipYear,
otherInternship = civilServiceExperience.otherInternship,
otherInternshipName = civilServiceExperience.otherInternshipName,
otherInternshipYear = civilServiceExperience.otherInternshipYear,
fastPassCertificate = civilServiceExperience.fastPassCertificate
)
}
}
| hmrc/fset-faststream | app/model/report/CivilServiceExperienceDetailsReportItem.scala | Scala | apache-2.0 | 2,464 |
// Copyright (C) 2015, codejitsu.
package net.codejitsu.saruman.dsl
import scala.collection.immutable.IndexedSeq
/**
* Host.
*/
case class Host(parts: collection.immutable.Seq[HostPart]) {
override def toString(): String = parts.map(_.toString).mkString(".")
def ~ (part: String): Host = Host(parts :+ HostPart(part))
def ~[T](part: IndexedSeq[T]): Hosts = {
val appended = part.map(p => parts :+ HostPart(p.toString))
val hostNames = appended.map(Host(_)).toSeq
Hosts(hostNames)
}
def ~[T <: Product](part: T): Hosts = {
val vals = for {
i <- 0 until part.productArity
} yield part.productElement(i).toString
this ~ vals
}
def isValid: Boolean = parts.forall(_.isValid)
}
object Localhost extends Host(List(HostPart("localhost"))) {
override def toString(): String = "localhost"
override def ~ (part: String): Host = throw new IllegalArgumentException()
override def ~[T](part: IndexedSeq[T]): Hosts = throw new IllegalArgumentException()
override def ~[T <: Product](part: T): Hosts = throw new IllegalArgumentException()
override def isValid: Boolean = true
}
| codejitsu/saruman | saruman-dsl/src/main/scala/Host.scala | Scala | apache-2.0 | 1,134 |
package com.twitter.finagle.mysql
import com.twitter.finagle.mysql.transport.{MysqlBuf, MysqlBufWriter, Packet}
import com.twitter.io.Buf
import java.security.MessageDigest
import java.util.logging.Logger
object Command {
val COM_POISON_CONN: Byte = (-2).toByte // used internally to close an underlying connection
val COM_NO_OP: Byte = (-1).toByte // used internally by this client
val COM_SLEEP: Byte = 0x00.toByte // internal thread state
val COM_QUIT: Byte = 0x01.toByte // mysql_close
val COM_INIT_DB: Byte = 0x02.toByte // mysql_select_db
val COM_QUERY: Byte = 0x03.toByte // mysql_real_query
val COM_FIELD_LIST: Byte = 0x04.toByte // mysql_list_fields
val COM_CREATE_DB: Byte = 0x05.toByte // mysql_create_db (deperacted)
val COM_DROP_DB: Byte = 0x06.toByte // mysql_drop_db (deprecated)
val COM_REFRESH: Byte = 0x07.toByte // mysql_refresh
val COM_SHUTDOWN: Byte = 0x08.toByte // mysql_shutdown
val COM_STATISTICS: Byte = 0x09.toByte // mysql_stat
val COM_PROCESS_INFO: Byte = 0x0A.toByte // mysql_list_processes
val COM_CONNECT: Byte = 0x0B.toByte // internal thread state
val COM_PROCESS_KILL: Byte = 0x0C.toByte // mysql_kill
val COM_DEBUG: Byte = 0x0D.toByte // mysql_dump_debug_info
val COM_PING: Byte = 0x0E.toByte // mysql_ping
val COM_TIME: Byte = 0x0F.toByte // internal thread state
val COM_DELAYED_INSERT: Byte = 0x10.toByte // internal thread state
val COM_CHANGE_USER: Byte = 0x11.toByte // mysql_change_user
val COM_BINLOG_DUMP: Byte = 0x12.toByte // sent by slave IO thread to req a binlog
val COM_TABLE_DUMP: Byte = 0x13.toByte // deprecated
val COM_CONNECT_OUT: Byte = 0x14.toByte // internal thread state
val COM_REGISTER_SLAVE: Byte = 0x15.toByte // sent by the slave to register with the master (optional)
val COM_STMT_PREPARE: Byte = 0x16.toByte // mysql_stmt_prepare
val COM_STMT_EXECUTE: Byte = 0x17.toByte // mysql_stmt_execute
val COM_STMT_SEND_LONG_DATA: Byte = 0x18.toByte // mysql_stmt_send_long_data
val COM_STMT_CLOSE: Byte = 0x19.toByte // mysql_stmt_close
val COM_STMT_RESET: Byte = 0x1A.toByte // mysql_stmt_reset
val COM_SET_OPTION: Byte = 0x1B.toByte // mysql_set_server_option
val COM_STMT_FETCH: Byte = 0x1C.toByte // mysql_stmt_fetch
}
sealed trait Request {
val seq: Short
val cmd: Byte = Command.COM_NO_OP
def toPacket: Packet
}
private[finagle] object PoisonConnectionRequest extends Request {
val seq: Short = 0
override val cmd: Byte = Command.COM_POISON_CONN
def toPacket: Packet = ???
}
/**
* A command request is a request initiated by the client
* and has a cmd byte associated with it.
*/
abstract class CommandRequest(override val cmd: Byte) extends Request {
val seq: Short = 0
}
/**
* Defines a request that encodes the command byte and
* associated data into a packet.
*/
class SimpleCommandRequest(command: Byte, data: Array[Byte]) extends CommandRequest(command) {
val buf = Buf.ByteArray.Owned(Array(command)).concat(Buf.ByteArray.Owned(data))
val toPacket = Packet(seq, buf)
}
/**
* A request to check if the server is alive.
* [[http://dev.mysql.com/doc/internals/en/com-ping.html]]
*/
case object PingRequest extends SimpleCommandRequest(Command.COM_PING, Array.emptyByteArray)
/**
* Tells the server that the client wants to close the connection.
* [[http://dev.mysql.com/doc/internals/en/com-quit.html]]
*/
case object QuitRequest extends SimpleCommandRequest(Command.COM_QUIT, Array.emptyByteArray)
/**
* A UseRequest is used to change the default schema of the connection.
* [[http://dev.mysql.com/doc/internals/en/com-init-db.html]]
*/
case class UseRequest(dbName: String)
extends SimpleCommandRequest(Command.COM_INIT_DB, dbName.getBytes)
/**
* A QueryRequest is used to send the server a text-based query that
* is executed immediately.
* [[http://dev.mysql.com/doc/internals/en/com-query.html]]
*/
case class QueryRequest(sqlStatement: String)
extends SimpleCommandRequest(Command.COM_QUERY, sqlStatement.getBytes)
/**
* Allocates a prepared statement on the server from the
* passed in query string.
* [[http://dev.mysql.com/doc/internals/en/com-stmt-prepare.html]]
*/
case class PrepareRequest(sqlStatement: String)
extends SimpleCommandRequest(Command.COM_STMT_PREPARE, sqlStatement.getBytes)
/**
* Client response sent during connection phase.
* Responsible for encoding credentials used to
* authenticate a session.
* [[http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse41]]
*/
case class HandshakeResponse(
username: Option[String],
password: Option[String],
database: Option[String],
clientCap: Capability,
salt: Array[Byte],
serverCap: Capability,
charset: Short,
maxPacketSize: Int
) extends Request {
import Capability._
override val seq: Short = 1
lazy val hashPassword = password match {
case Some(p) => encryptPassword(p, salt)
case None => Array[Byte]()
}
def toPacket = {
val fixedBodySize = 34
val dbStrSize = database.map { _.length + 1 }.getOrElse(0)
val packetBodySize =
username.getOrElse("").length + hashPassword.length + dbStrSize + fixedBodySize
val bw = MysqlBuf.writer(new Array[Byte](packetBodySize))
bw.writeIntLE(clientCap.mask)
bw.writeIntLE(maxPacketSize)
bw.writeByte(charset)
bw.fill(23, 0.toByte) // 23 reserved bytes - zeroed out
bw.writeNullTerminatedString(username.getOrElse(""))
bw.writeLengthCodedBytes(hashPassword)
if (clientCap.has(ConnectWithDB) && serverCap.has(ConnectWithDB))
bw.writeNullTerminatedString(database.get)
Packet(seq, bw.owned())
}
private[this] def encryptPassword(password: String, salt: Array[Byte]) = {
val md = MessageDigest.getInstance("SHA-1")
val hash1 = md.digest(password.getBytes(Charset(charset).displayName))
md.reset()
val hash2 = md.digest(hash1)
md.reset()
md.update(salt)
md.update(hash2)
val digest = md.digest()
(0 until digest.length) foreach { i =>
digest(i) = (digest(i) ^ hash1(i)).toByte
}
digest
}
}
class FetchRequest(val prepareOK: PrepareOK, val numRows: Int)
extends CommandRequest(Command.COM_STMT_FETCH) {
val stmtId = prepareOK.id
override def toPacket: Packet = {
val bw = MysqlBuf.writer(new Array[Byte](9))
bw.writeByte(cmd)
bw.writeIntLE(stmtId)
bw.writeIntLE(numRows)
Packet(seq, bw.owned())
}
}
/**
* Uses the binary protocol to build an execute request for
* a prepared statement.
* [[http://dev.mysql.com/doc/internals/en/com-stmt-execute.html]]
*/
class ExecuteRequest(
val stmtId: Int,
val params: IndexedSeq[Parameter],
val hasNewParams: Boolean,
val flags: Byte
) extends CommandRequest(Command.COM_STMT_EXECUTE) {
private[this] val log = Logger.getLogger("finagle-mysql")
private[this] def makeNullBitmap(parameters: IndexedSeq[Parameter]): Array[Byte] = {
val bitmap = new Array[Byte]((parameters.size + 7) / 8)
val ps = parameters.zipWithIndex
ps foreach {
case (Parameter.NullParameter, idx) =>
val bytePos = idx / 8
val bitPos = idx % 8
val byte = bitmap(bytePos)
bitmap(bytePos) = (byte | (1 << bitPos)).toByte
case _ =>
()
}
bitmap
}
private[this] def writeTypeCode(param: Parameter, writer: MysqlBufWriter): Unit = {
val typeCode = param.typeCode
if (typeCode != -1)
writer.writeShortLE(typeCode)
else {
// Unsupported type. Write the error to log, and write the type as null.
// This allows us to safely skip writing the parameter without corrupting the buffer.
log.warning(
"Unknown parameter %s will be treated as SQL NULL.".format(param.getClass.getName)
)
writer.writeShortLE(Type.Null)
}
}
/**
* Returns sizeof all the parameters according to
* mysql binary encoding.
*/
private[this] def sizeOfParameters(parameters: IndexedSeq[Parameter]): Int =
parameters.foldLeft(0)(_ + _.size)
/**
* Writes the parameter into its MySQL binary representation.
*/
private[this] def writeParam(param: Parameter, writer: MysqlBufWriter): MysqlBufWriter = {
param.writeTo(writer)
writer
}
def toPacket = {
val bw = MysqlBuf.writer(new Array[Byte](10))
bw.writeByte(cmd)
bw.writeIntLE(stmtId)
bw.writeByte(flags)
bw.writeIntLE(1) // iteration count - always 1
val newParamsBound: Byte = if (hasNewParams) 1 else 0
val newParamsBoundBuf = Buf.ByteArray.Owned(Array(newParamsBound))
// convert parameters to binary representation.
val sizeOfParams = sizeOfParameters(params)
val values = MysqlBuf.writer(new Array[Byte](sizeOfParams))
params foreach { writeParam(_, values) }
// encode null values in bitmap
val nullBitmap = Buf.ByteArray.Owned(makeNullBitmap(params))
// parameters are appended to the end of the packet
// only if the statement has new parameters.
val composite = if (hasNewParams) {
val types = MysqlBuf.writer(new Array[Byte](params.size * 2))
params foreach { writeTypeCode(_, types) }
Buf(Seq(bw.owned(), nullBitmap, newParamsBoundBuf, types.owned(), values.owned()))
} else {
Buf(Seq(bw.owned(), nullBitmap, newParamsBoundBuf, values.owned()))
}
Packet(seq, composite)
}
}
object ExecuteRequest {
val FLAG_CURSOR_READ_ONLY = 0x01.toByte // CURSOR_TYPE_READ_ONLY
def apply(
stmtId: Int,
params: IndexedSeq[Parameter] = IndexedSeq.empty,
hasNewParams: Boolean = true,
flags: Byte = 0
) = {
val sanitizedParams = params.map {
case null => Parameter.NullParameter
case other => other
}
new ExecuteRequest(stmtId, sanitizedParams, hasNewParams, flags)
}
def unapply(executeRequest: ExecuteRequest): Option[(Int, IndexedSeq[Parameter], Boolean, Byte)] = {
Some(
(
executeRequest.stmtId,
executeRequest.params,
executeRequest.hasNewParams,
executeRequest.flags
)
)
}
}
/**
* A CloseRequest deallocates a prepared statement on the server.
* No response is sent back to the client.
* [[http://dev.mysql.com/doc/internals/en/com-stmt-close.html]]
*/
case class CloseRequest(stmtId: Int) extends CommandRequest(Command.COM_STMT_CLOSE) {
override val toPacket = {
val bw = MysqlBuf.writer(new Array[Byte](5))
bw.writeByte(cmd).writeIntLE(stmtId)
Packet(seq, bw.owned())
}
}
| mkhq/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/Request.scala | Scala | apache-2.0 | 10,464 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.submit.submitsteps.initcontainer
import java.io.File
import com.google.common.base.Charsets
import com.google.common.io.{BaseEncoding, Files}
import io.fabric8.kubernetes.api.model.{Secret, SecretBuilder}
import scala.collection.JavaConverters._
import org.apache.spark.SparkException
import org.apache.spark.deploy.kubernetes.InitContainerResourceStagingServerSecretPlugin
import org.apache.spark.deploy.kubernetes.config._
import org.apache.spark.deploy.kubernetes.constants._
import org.apache.spark.deploy.kubernetes.submit.SubmittedDependencyUploader
import org.apache.spark.internal.config.OptionalConfigEntry
import org.apache.spark.util.Utils
private[spark] class SubmittedResourcesInitContainerConfigurationStep(
submittedResourcesSecretName: String,
internalResourceStagingServerUri: String,
initContainerSecretMountPath: String,
resourceStagingServerSslEnabled: Boolean,
maybeInternalTrustStoreUri: Option[String],
maybeInternalClientCertUri: Option[String],
maybeInternalTrustStorePassword: Option[String],
maybeInternalTrustStoreType: Option[String],
submittedDependencyUploader: SubmittedDependencyUploader,
submittedResourcesSecretPlugin: InitContainerResourceStagingServerSecretPlugin)
extends InitContainerConfigurationStep {
override def configureInitContainer(initContainerSpec: InitContainerSpec): InitContainerSpec = {
val jarsIdAndSecret = submittedDependencyUploader.uploadJars()
val filesIdAndSecret = submittedDependencyUploader.uploadFiles()
val submittedResourcesInitContainerProperties = Map[String, String](
RESOURCE_STAGING_SERVER_URI.key -> internalResourceStagingServerUri,
INIT_CONTAINER_DOWNLOAD_JARS_RESOURCE_IDENTIFIER.key -> jarsIdAndSecret.resourceId,
INIT_CONTAINER_DOWNLOAD_JARS_SECRET_LOCATION.key ->
s"$initContainerSecretMountPath/$INIT_CONTAINER_SUBMITTED_JARS_SECRET_KEY",
INIT_CONTAINER_DOWNLOAD_FILES_RESOURCE_IDENTIFIER.key -> filesIdAndSecret.resourceId,
INIT_CONTAINER_DOWNLOAD_FILES_SECRET_LOCATION.key ->
s"$initContainerSecretMountPath/$INIT_CONTAINER_SUBMITTED_FILES_SECRET_KEY",
RESOURCE_STAGING_SERVER_SSL_ENABLED.key -> resourceStagingServerSslEnabled.toString) ++
resolveSecretPath(
maybeInternalTrustStoreUri,
INIT_CONTAINER_STAGING_SERVER_TRUSTSTORE_SECRET_KEY,
RESOURCE_STAGING_SERVER_TRUSTSTORE_FILE,
"TrustStore URI") ++
resolveSecretPath(
maybeInternalClientCertUri,
INIT_CONTAINER_STAGING_SERVER_CLIENT_CERT_SECRET_KEY,
RESOURCE_STAGING_SERVER_CLIENT_CERT_PEM,
"Client certificate URI") ++
maybeInternalTrustStorePassword.map { password =>
(RESOURCE_STAGING_SERVER_TRUSTSTORE_PASSWORD.key, password)
}.toMap ++
maybeInternalTrustStoreType.map { storeType =>
(RESOURCE_STAGING_SERVER_TRUSTSTORE_TYPE.key, storeType)
}.toMap
val initContainerSecret = createResourceStagingServerSecret(
jarsIdAndSecret.resourceSecret, filesIdAndSecret.resourceSecret)
val additionalDriverSparkConf =
Map(
EXECUTOR_INIT_CONTAINER_SECRET.key -> initContainerSecret.getMetadata.getName,
EXECUTOR_INIT_CONTAINER_SECRET_MOUNT_DIR.key -> initContainerSecretMountPath)
val initContainerWithSecretVolumeMount = submittedResourcesSecretPlugin
.mountResourceStagingServerSecretIntoInitContainer(initContainerSpec.initContainer)
val podWithSecretVolume = submittedResourcesSecretPlugin
.addResourceStagingServerSecretVolumeToPod(initContainerSpec.podToInitialize)
initContainerSpec.copy(
initContainer = initContainerWithSecretVolumeMount,
podToInitialize = podWithSecretVolume,
initContainerDependentResources =
initContainerSpec.initContainerDependentResources ++ Seq(initContainerSecret),
initContainerProperties =
initContainerSpec.initContainerProperties ++ submittedResourcesInitContainerProperties,
additionalDriverSparkConf = additionalDriverSparkConf)
}
private def createResourceStagingServerSecret(
jarsResourceSecret: String, filesResourceSecret: String): Secret = {
val trustStoreBase64 = convertFileToBase64IfSubmitterLocal(
INIT_CONTAINER_STAGING_SERVER_TRUSTSTORE_SECRET_KEY, maybeInternalTrustStoreUri)
val clientCertBase64 = convertFileToBase64IfSubmitterLocal(
INIT_CONTAINER_STAGING_SERVER_CLIENT_CERT_SECRET_KEY, maybeInternalClientCertUri)
val jarsSecretBase64 = BaseEncoding.base64().encode(jarsResourceSecret.getBytes(Charsets.UTF_8))
val filesSecretBase64 = BaseEncoding.base64().encode(
filesResourceSecret.getBytes(Charsets.UTF_8))
val secretData = Map(
INIT_CONTAINER_SUBMITTED_JARS_SECRET_KEY -> jarsSecretBase64,
INIT_CONTAINER_SUBMITTED_FILES_SECRET_KEY -> filesSecretBase64) ++
trustStoreBase64 ++
clientCertBase64
val kubernetesSecret = new SecretBuilder()
.withNewMetadata()
.withName(submittedResourcesSecretName)
.endMetadata()
.addToData(secretData.asJava)
.build()
kubernetesSecret
}
private def convertFileToBase64IfSubmitterLocal(secretKey: String, secretUri: Option[String])
: Map[String, String] = {
secretUri.filter { trustStore =>
Option(Utils.resolveURI(trustStore).getScheme).getOrElse("file") == "file"
}.map { uri =>
val file = new File(Utils.resolveURI(uri).getPath)
require(file.isFile, "Dependency server trustStore provided at" +
file.getAbsolutePath + " does not exist or is not a file.")
(secretKey, BaseEncoding.base64().encode(Files.toByteArray(file)))
}.toMap
}
private def resolveSecretPath(
maybeUri: Option[String],
secretKey: String,
configEntry: OptionalConfigEntry[String],
uriType: String): Map[String, String] = {
maybeUri.map(Utils.resolveURI).map { uri =>
val resolvedPath = Option(uri.getScheme).getOrElse("file") match {
case "file" => s"$initContainerSecretMountPath/$secretKey"
case "local" => uri.getPath
case invalid => throw new SparkException(s"$uriType has invalid scheme $invalid must be" +
s" local://, file://, or empty.")
}
(configEntry.key, resolvedPath)
}.toMap
}
}
| kimoonkim/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/kubernetes/submit/submitsteps/initcontainer/SubmittedResourcesInitContainerConfigurationStep.scala | Scala | apache-2.0 | 7,154 |
package mesosphere.marathon
package core.task.termination.impl
import java.util.UUID
import akka.Done
import akka.actor.{ ActorRef, PoisonPill, Terminated }
import akka.testkit.TestProbe
import com.typesafe.scalalogging.StrictLogging
import mesosphere.AkkaUnitTest
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.event.{ InstanceChanged, UnknownInstanceTerminated }
import mesosphere.marathon.core.instance.update.{ InstanceChange, InstanceUpdateOperation }
import mesosphere.marathon.core.instance.{ Instance, TestInstanceBuilder }
import mesosphere.marathon.core.pod.MesosContainer
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.bus.TaskStatusUpdateTestHelper
import mesosphere.marathon.core.task.termination.KillConfig
import mesosphere.marathon.core.task.tracker.InstanceStateOpProcessor
import mesosphere.marathon.raml.Resources
import mesosphere.marathon.state.{ PathId, Timestamp }
import mesosphere.marathon.stream.Implicits._
import org.apache.mesos
import org.apache.mesos.SchedulerDriver
import org.mockito.ArgumentCaptor
import scala.concurrent.Promise
import scala.concurrent.duration._
class KillServiceActorTest extends AkkaUnitTest with StrictLogging {
val defaultConfig: KillConfig = new KillConfig {
override lazy val killChunkSize: Int = 5
override lazy val killRetryTimeout: FiniteDuration = 10.minutes
}
val retryConfig: KillConfig = new KillConfig {
override lazy val killChunkSize: Int = 5
override lazy val killRetryTimeout: FiniteDuration = 500.millis
}
"The KillServiceActor" when {
"asked to kill a single known instance" should {
"issue a kill to the driver" in withActor(defaultConfig) { (f, actor) =>
val instance = f.mockInstance(f.runSpecId, f.now(), mesos.Protos.TaskState.TASK_RUNNING)
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
val (taskId, _) = instance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance).wrapped)
promise.future.futureValue should be(Done)
}
}
"asked to kill an unknown instance" should {
"issue a kill to the driver" in withActor(defaultConfig) { (f, actor) =>
val taskId = Task.Id.forRunSpec(PathId("/unknown"))
actor ! KillServiceActor.KillUnknownTaskById(taskId)
f.publishUnknownInstanceTerminated(taskId.instanceId)
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
noMoreInteractions(f.driver)
}
}
"asked to kill single known unreachable instance" should {
"issue no kill to the driver because the task is unreachable and send an expunge" in withActor(defaultConfig) { (f, actor) =>
val instance = f.mockInstance(f.runSpecId, f.now(), mesos.Protos.TaskState.TASK_UNREACHABLE)
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
noMoreInteractions(f.driver)
verify(f.stateOpProcessor, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).process(InstanceUpdateOperation.ForceExpunge(instance.instanceId))
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance).wrapped)
promise.future.futureValue should be(Done)
}
}
"asked to kill multiple instances at once" should {
"issue three kill requests to the driver" in withActor(defaultConfig) { (f, actor) =>
val runningInstance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
val unreachableInstance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_UNREACHABLE)
val stagingInstance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_STAGING)
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(runningInstance, unreachableInstance, stagingInstance), promise)
val (runningTaskId, _) = runningInstance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(runningTaskId.mesosTaskId)
verify(f.stateOpProcessor, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).process(InstanceUpdateOperation.ForceExpunge(unreachableInstance.instanceId))
val (stagingTaskId, _) = stagingInstance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(stagingTaskId.mesosTaskId)
noMoreInteractions(f.driver)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(runningInstance).wrapped)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.gone(unreachableInstance).wrapped)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.unreachable(stagingInstance).wrapped)
promise.future.futureValue should be (Done)
}
}
"asked to kill multiple tasks at once with an empty list" should {
"issue no kill" in withActor(defaultConfig) { (f, actor) =>
val emptyList = Seq.empty[Instance]
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(emptyList, promise)
promise.future.futureValue should be (Done)
noMoreInteractions(f.driver)
}
}
"asked to kill multiple instances subsequently" should {
"issue exactly 3 kills to the driver and complete the future successfully" in withActor(defaultConfig) { (f, actor) =>
val instance1 = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
val instance2 = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
val instance3 = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
val promise1 = Promise[Done]()
val promise2 = Promise[Done]()
val promise3 = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance1), promise1)
actor ! KillServiceActor.KillInstances(Seq(instance2), promise2)
actor ! KillServiceActor.KillInstances(Seq(instance3), promise3)
val (taskId1, _) = instance1.tasksMap.head
val (taskId2, _) = instance2.tasksMap.head
val (taskId3, _) = instance3.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId1.mesosTaskId)
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId2.mesosTaskId)
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId3.mesosTaskId)
noMoreInteractions(f.driver)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance1).wrapped)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance2).wrapped)
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance3).wrapped)
promise1.future.futureValue should be (Done)
promise2.future.futureValue should be (Done)
promise3.future.futureValue should be (Done)
}
}
"killing instances is throttled (single requests)" should {
"issue 5 kills immediately to the driver" in withActor(defaultConfig) { (f, actor) =>
val instances: Map[Instance.Id, Instance] = (1 to 10).map { index =>
val instance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
instance.instanceId -> instance
}(collection.breakOut)
instances.valuesIterator.foreach { instance =>
actor ! KillServiceActor.KillInstances(Seq(instance), Promise[Done]())
}
val captor: ArgumentCaptor[mesos.Protos.TaskID] = ArgumentCaptor.forClass(classOf[mesos.Protos.TaskID])
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(5)).killTask(captor.capture())
reset(f.driver)
captor.getAllValues.foreach { id =>
val instanceId = Task.Id(id).instanceId
instances.get(instanceId).foreach { instance =>
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance).wrapped)
}
}
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(5)).killTask(any)
noMoreInteractions(f.driver)
}
}
"killing instances is throttled (batch request)" should {
"issue 5 kills immediately to the driver" in withActor(defaultConfig) { (f, actor) =>
val instances: Map[Instance.Id, Instance] = (1 to 10).map { index =>
val instance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
instance.instanceId -> instance
}(collection.breakOut)
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(instances.values.to[Seq], promise)
val captor: ArgumentCaptor[mesos.Protos.TaskID] = ArgumentCaptor.forClass(classOf[mesos.Protos.TaskID])
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(5)).killTask(captor.capture())
reset(f.driver)
captor.getAllValues.foreach { id =>
val instanceId = Task.Id(id).instanceId
instances.get(instanceId).foreach { instance =>
f.publishInstanceChanged(TaskStatusUpdateTestHelper.killed(instance).wrapped)
}
}
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(5)).killTask(any)
noMoreInteractions(f.driver)
}
}
"killing with retry will be retried" should {
"issue a kill to the driver an eventually retry" in withActor(retryConfig) { (f, actor) =>
val instance = f.mockInstance(f.runSpecId, f.clock.now(), mesos.Protos.TaskState.TASK_RUNNING)
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
val (taskId, _) = instance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
f.clock.+=(10.seconds)
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
}
}
"when asked to kill all non-terminal tasks of a pod instance" should {
"issue 2 kills to the driver and retry eventually" in withActor(defaultConfig) { (f, actor) =>
val stagingContainer = f.container("stagingContainer")
val runningContainer = f.container("runningContainer")
val finishedContainer = f.container("finishedContainer")
var instance = TestInstanceBuilder.newBuilder(f.runSpecId)
.addTaskStaged(containerName = Some(stagingContainer.name))
.addTaskStaged(containerName = Some(runningContainer.name))
.addTaskStaged(containerName = Some(finishedContainer.name))
.getInstance()
instance = TaskStatusUpdateTestHelper.running(instance, Some(runningContainer)).updatedInstance
instance = TaskStatusUpdateTestHelper.finished(instance, Some(finishedContainer)).updatedInstance
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
val captor = ArgumentCaptor.forClass(classOf[mesos.Protos.TaskID])
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(2)).killTask(captor.capture())
captor.getAllValues should have size 2
captor.getAllValues should contain(f.taskIdFor(instance, stagingContainer))
captor.getAllValues should contain(f.taskIdFor(instance, runningContainer))
f.clock.+=(10.seconds)
val (taskId, _) = instance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
}
}
"when killing all non-terminal tasks of a pod instance" should {
"issue 2 kills to the driver" in withActor(defaultConfig) { (f, actor) =>
val stagingContainer = f.container("stagingContainer")
val runningContainer = f.container("runningContainer")
val finishedContainer = f.container("finishedContainer")
var instance = TestInstanceBuilder.newBuilder(f.runSpecId)
.addTaskStaged(containerName = Some(stagingContainer.name))
.addTaskStaged(containerName = Some(runningContainer.name))
.addTaskStaged(containerName = Some(finishedContainer.name))
.getInstance()
instance = TaskStatusUpdateTestHelper.running(instance, Some(runningContainer)).updatedInstance
instance = TaskStatusUpdateTestHelper.finished(instance, Some(finishedContainer)).updatedInstance
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
val captor = ArgumentCaptor.forClass(classOf[mesos.Protos.TaskID])
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2).times(2)).killTask(captor.capture())
captor.getAllValues should have size 2
captor.getAllValues should contain(f.taskIdFor(instance, stagingContainer))
captor.getAllValues should contain(f.taskIdFor(instance, runningContainer))
f.clock.+=(10.seconds)
val (taskId, _) = instance.tasksMap.head
verify(f.driver, timeout(f.killConfig.killRetryTimeout.toMillis.toInt * 2)).killTask(taskId.mesosTaskId)
}
}
"a pod instance with only terminal tasks will be expunged and no kills are issued" should {
"issue no kills" in withActor(defaultConfig) { (f, actor) =>
val finishedContainer1 = f.container("finishedContainer1")
val finishedContainer2 = f.container("finishedContainer2")
var instance = TestInstanceBuilder.newBuilder(f.runSpecId)
.addTaskRunning(containerName = Some(finishedContainer1.name))
.addTaskRunning(containerName = Some(finishedContainer2.name))
.getInstance()
instance = TaskStatusUpdateTestHelper.finished(instance, Some(finishedContainer1)).updatedInstance
instance = TaskStatusUpdateTestHelper.finished(instance, Some(finishedContainer2)).updatedInstance
val promise = Promise[Done]()
actor ! KillServiceActor.KillInstances(Seq(instance), promise)
noMoreInteractions(f.driver)
verify(f.stateOpProcessor, timeout(f.killConfig.killRetryTimeout.toMillis.toInt)).process(InstanceUpdateOperation.ForceExpunge(instance.instanceId))
}
}
}
def withActor(killConfig: KillConfig)(testCode: (Fixture, ActorRef) => Any): Unit = {
val f = new Fixture(killConfig)
val actor = system.actorOf(KillServiceActor.props(f.driverHolder, f.stateOpProcessor, killConfig, f.clock), s"KillService-${UUID.randomUUID()}")
try {
testCode(f, actor)
} finally {
actor ! PoisonPill
val probe = TestProbe()
probe.watch(actor)
val terminated = probe.expectMsgAnyClassOf(classOf[Terminated])
assert(terminated.actor == actor)
}
}
class Fixture(val killConfig: KillConfig) {
val runSpecId = PathId("/test")
val driver = mock[SchedulerDriver]
val driverHolder: MarathonSchedulerDriverHolder = {
val holder = new MarathonSchedulerDriverHolder
holder.driver = Some(driver)
holder
}
val stateOpProcessor: InstanceStateOpProcessor = mock[InstanceStateOpProcessor]
val clock = new SettableClock()
def mockInstance(appId: PathId, stagedAt: Timestamp, mesosState: mesos.Protos.TaskState): Instance = {
TestInstanceBuilder.newBuilder(appId).addTaskWithBuilder().taskForStatus(mesosState, stagedAt).build().getInstance()
}
def publishInstanceChanged(instanceChange: InstanceChange): Unit = {
val instanceChangedEvent = InstanceChanged(instanceChange)
logger.info("publish {} on the event stream", instanceChangedEvent)
system.eventStream.publish(instanceChangedEvent)
}
def publishUnknownInstanceTerminated(instanceId: Instance.Id): Unit = {
val event = UnknownInstanceTerminated(instanceId, instanceId.runSpecId, Condition.Killed)
logger.info("publish {} on the event stream", event)
system.eventStream.publish(event)
}
def now(): Timestamp = Timestamp.zero
def container(name: String) = MesosContainer(name = name, resources = Resources())
def taskIdFor(instance: Instance, container: MesosContainer): mesos.Protos.TaskID = {
val taskId = Task.Id.forInstanceId(instance.instanceId, Some(container))
taskId.mesosTaskId
}
}
}
| janisz/marathon | src/test/scala/mesosphere/marathon/core/task/termination/impl/KillServiceActorTest.scala | Scala | apache-2.0 | 16,787 |
/*
* Copyright 2017-2020 Aleksey Fomkin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package korolev.server
import korolev.effect.Effect
import korolev.state.DeviceId
import korolev.web.Request.Head
object StateLoader {
/**
* State is same for all sessions.
*
* @param initialState State factory
* @tparam S Type of state
*/
def default[F[_] : Effect, S](initialState: S): StateLoader[F, S] = {
val value = Effect[F].pure(initialState)
(_, _) => value // always return same object
}
/**
* State depends on deviceId. Useful when you want to
* restore user authorization.
*
* {{{
* case class MyState(deviceId: DeviceId, ...)
*
* StateLoader.forDeviceId { deviceId =>
* MyStorage.getStateByDeviceId(deviceId) map {
* case Some(state) => state
* case None => MyState(deviceId, ...)
* }
* }
* }}}
*/
def forDeviceId[F[_], S](initialState: DeviceId => F[S]): StateLoader[F, S] =
(deviceId, _) => initialState(deviceId)
/**
* State depends on deviceId and HTTP-request. Second one
* could be None if case when user reconnected to
* restarted application and state wasn't restored.
*/
def apply[F[_], S](f: (DeviceId, Head) => F[S]): StateLoader[F, S] = f
}
| fomkin/korolev | modules/korolev/src/main/scala/korolev/server/StateLoader.scala | Scala | apache-2.0 | 1,782 |
package com.typesafe.sbt
package packager
package rpm
import Keys._
import linux._
import sbt._
/** Plugin trait containing all generic values used for packaging linux software. */
trait RpmPlugin extends Plugin with LinuxPlugin {
val Rpm = config("rpm") extend Linux
def rpmSettings: Seq[Setting[_]] = Seq(
rpmOs := "Linux", // TODO - default to something else?
rpmRelease := "0",
rpmVendor := "", // TODO - Maybe pull in organization?
rpmLicense := None,
rpmDistribution := None,
rpmUrl := None,
rpmGroup := None,
rpmPackager := None,
rpmIcon := None,
rpmAutoprov := "yes",
rpmAutoreq := "yes",
rpmProvides := Seq.empty,
rpmRequirements := Seq.empty,
rpmPrerequisites := Seq.empty,
rpmObsoletes := Seq.empty,
rpmConflicts := Seq.empty,
rpmPretrans := None,
rpmPre := None,
rpmPost := None,
rpmVerifyscript := None,
rpmPosttrans := None,
rpmPreun := None,
rpmPostun := None,
packageSummary in Rpm <<= packageSummary in Linux,
packageDescription in Rpm <<= packageDescription in Linux,
target in Rpm <<= target(_ / "rpm")
) ++ inConfig(Rpm)(Seq(
packageArchitecture := "noarch",
rpmMetadata <<=
(name, version, rpmRelease, packageArchitecture, rpmVendor, rpmOs, packageSummary, packageDescription, rpmAutoprov, rpmAutoreq) apply (RpmMetadata.apply),
rpmDescription <<=
(rpmLicense, rpmDistribution, rpmUrl, rpmGroup, rpmPackager, rpmIcon) apply RpmDescription,
rpmDependencies <<=
(rpmProvides, rpmRequirements, rpmPrerequisites, rpmObsoletes, rpmConflicts) apply RpmDependencies,
rpmScripts <<=
(rpmPretrans,rpmPre,rpmPost,rpmVerifyscript,rpmPosttrans,rpmPreun,rpmPostun) apply RpmScripts,
rpmSpecConfig <<=
(rpmMetadata, rpmDescription, rpmDependencies, rpmScripts, linuxPackageMappings, linuxPackageSymlinks) map RpmSpec,
packageBin <<= (rpmSpecConfig, target, streams) map { (spec, dir, s) =>
RpmHelper.buildRpm(spec, dir, s.log)
},
rpmLint <<= (packageBin, streams) map { (rpm, s) =>
(Process(Seq("rpmlint", "-v", rpm.getAbsolutePath)) ! s.log) match {
case 0 => ()
case x => sys.error("Failed to run rpmlint, exit status: " + x)
}
}
))
}
| yanns/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/rpm/RpmPlugin.scala | Scala | bsd-2-clause | 2,276 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.prometheus
import java.net.URLEncoder
import java.util.{List => JList, Map => JMap}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import org.slf4j.LoggerFactory
/**
* A HTTP client for the [[https://prometheus.io/docs/querying/api/ Prometheus V1 HTTP API]].
*
*/
object PrometheusClient {
private val log = LoggerFactory.getLogger(getClass)
class PrometheusResult(
val labels: Seq[(String, String)],
val values: Seq[(Long, String)])
/**
* @param endpoint URL to the root of the Prometheus web UI, e.g. "http://prom.mydomain.com:1234"
* @param query query to pass to Prometheus
*/
def read(
endpoint: String,
query: String,
startTimeMs: Long,
endTimeMs: Long,
stepMs: Long): Seq[PrometheusResult] = {
val encodedQuery = URLEncoder.encode(query, "UTF8")
val start = BigDecimal(startTimeMs) / 1000
val end = BigDecimal(endTimeMs) / 1000
val step = BigDecimal(stepMs) / 1000
val url = s"$endpoint/api/v1/query_range?query=$encodedQuery&start=$start&end=$end&step=$step"
val startTime = System.nanoTime()
val json: String = scala.io.Source.fromURL(url).mkString
val timeTaken = (System.nanoTime() - startTime) / 1000 / 1000
log.debug(s"Request took $timeTaken ms: " + url)
// This is pretty hacky, but it is a pain to work with JSON in a type safe language
// where the data is actually dynamically typed ...
val map = new ObjectMapper().readValue(json, classOf[JMap[String, Object]])
val data = map.get("data").asInstanceOf[JMap[String, Object]]
val result = data.get("result").asInstanceOf[JList[JMap[String, Object]]]
result.asScala.map { item =>
val labels = item.get("metric").asInstanceOf[JMap[String, String]].asScala.toSeq
val values = item.get("values").asInstanceOf[JList[JList[Object]]].asScala.map { v =>
((v.get(0).asInstanceOf[Double] * 1000).toLong, v.get(1).asInstanceOf[String])
}
new PrometheusResult(labels, values)
}
}
}
| BrentDorsey/pipeline | metrics.ml/spark-prometheus/src/main/scala/com/databricks/spark/prometheus/PrometheusClient.scala | Scala | apache-2.0 | 2,679 |
package core
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import models.Session
import models.Tick
import org.joda.time.DateTime
import org.joda.time.DateTimeConstants
/**
* Tests to check that the tariff timetable holds a couple of years from now.
*/
@RunWith(classOf[JUnitRunner])
class TariffCalculatorFutureTest extends FunSuite with Fixtures {
test("Monday morning before 6AM has Tariff3, a couple of years from now") {
val a = now.plusYears(2).withDayOfWeek(DateTimeConstants.MONDAY).withHourOfDay(3)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff3)
assert(intervals.head.toTariff == Tariff1)
assert(intervals.size == 1)
}
test("Monday between 6AM and 8PM has Tariff1, a couple of years from now") {
val a = now.plusYears(2).withDayOfWeek(DateTimeConstants.MONDAY).withHourOfDay(10)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff1)
assert(intervals.head.toTariff == Tariff2)
assert(intervals.size == 1)
}
test("Weekday morning before 6AM has Tariff3, a couple of years from now") {
jodaWeekdays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(3)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff3)
assert(intervals.head.toTariff == Tariff1)
assert(intervals.size == 1)
})
}
test("Weekday between 6AM and 8PM has Tariff1, a couple of years from now") {
jodaWeekdays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(10)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff1)
assert(intervals.head.toTariff == Tariff2)
assert(intervals.size == 1)
})
}
test("Weekday between 8PM and 10PM has Tariff2, a couple of years from now") {
jodaWeekdays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(21)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff2)
assert(intervals.head.toTariff == Tariff3)
assert(intervals.size == 1)
})
}
test("Weekday after 10PM has Tariff3, a couple of years from now") {
jodaWeekdays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(23)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff3)
assert(intervals.head.toTariff == Tariff3)
assert(intervals.size == 1)
})
}
test("Weekend morning before 6AM has Tariff3, a couple of years from now") {
jodaWeekendDays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(3)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff3)
assert(intervals.head.toTariff == Tariff2)
assert(intervals.size == 1)
})
}
test("Weekend between 6AM and 10PM has Tariff2, a couple of years from now") {
jodaWeekendDays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(10)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff2)
assert(intervals.head.toTariff == Tariff3)
assert(intervals.size == 1)
})
}
test("Weekend after 10PM has Tariff3, a couple of years from now") {
jodaWeekendDays.map(now.plusYears(2).withDayOfWeek(_)).foreach(day => {
val a = day.withHourOfDay(23)
val b = a.plusMillis(100)
val previous = Session("test1", Tick(at = Some(a.getMillis()), odo = 0))
val current = Session("test1", Tick(at = Some(b.getMillis()), odo = 0))
val intervals = Calculator.getTariffIntervals(previous, current)
assert(intervals.head.fromTariff == Tariff3)
assert(intervals.head.toTariff == Tariff3)
assert(intervals.size == 1)
})
}
} | opyate/taximeter | src/test/scala/core/TariffCalculatorFutureTest.scala | Scala | mit | 5,728 |
package com.sksamuel.elastic4s
import org.elasticsearch.script.Script
import org.elasticsearch.script.ScriptService.{ScriptType => ESScriptType}
import scala.language.implicitConversions
trait ScriptDsl {
def script(script: String): ScriptDefinition = ScriptDefinition(null, script)
def script(name: String, script: String) = ScriptDefinition(name, script)
}
case class ScriptDefinition(name: String, // todo is this actually used?
script: String,
lang: Option[String] = None,
scriptType: ESScriptType = ESScriptType.INLINE,
params: Map[String, Any] = Map.empty) {
import scala.collection.JavaConverters._
def lang(lang: String): ScriptDefinition = copy(lang = Option(lang))
def param(name: String, value: Any): ScriptDefinition = copy(params = params + (name -> value))
def params(first: (String, Any), rest: (String, Any)*): ScriptDefinition = params(first +: rest)
def params(seq: Seq[(String, Any)]): ScriptDefinition = params(seq.toMap)
def params(map: Map[String, Any]): ScriptDefinition = copy(params = params ++ map)
def scriptType(scriptType: ESScriptType): ScriptDefinition = copy(scriptType = scriptType)
def toJavaAPI: Script = new Script(script, scriptType, lang.orNull, params.asJava)
}
object ScriptDefinition {
implicit def string2Script(script: String): ScriptDefinition = ScriptDefinition(null, script)
} | anand-singh/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/ScriptDsl.scala | Scala | apache-2.0 | 1,464 |
package example.producer
object ProducerExample {
def main(args: Array[String]): Unit = {
val topicName =
if(args.length == 0) "testTopic"
else args(0)
val strProducer = Producer[String](topicName)
for (ln <- io.Source.stdin.getLines) strProducer.send(ln)
}
}
| loveltyoic/scala | src/main/scala/example/producer/ProducerExample.scala | Scala | apache-2.0 | 291 |
package com.flowy.bexchange
import java.time.{Instant, ZoneOffset}
import java.util.UUID
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Publish
import com.flowy.bexchange.trade.TradeActor
import com.flowy.bexchange.trade.TradeActor.Update
import com.flowy.common.database.TheEverythingBagelDao
import com.flowy.common.models.MarketStructures.MarketUpdate
import com.flowy.common.models._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ExecutionContext, Future}
import redis.RedisClient
import scala.tools.reflect.ToolBox
import scala.util.{Failure, Success}
object MarketTradeService {
def props(marketName: String, bagel: TheEverythingBagelDao, redis: RedisClient)(implicit context: ExecutionContext) =
Props(new MarketTradeService(marketName, bagel, redis))
case class DeleteTrade(trade: Trade, senderOpt: Option[ActorRef] = None)
case class PostTrade(forUser: UserData, request: TradeRequest, senderOpt: Option[ActorRef] = None)
case class UpdateTrade(forUser: UserData, tradeId: UUID, request: TradeRequest, senderOpt: Option[ActorRef] = None)
}
/**
* Services a single market: example BTC-NEO
* @param marketName
* @param bagel
* @param redis
*/
class MarketTradeService(val marketName: String, bagel: TheEverythingBagelDao, redis: RedisClient) extends Actor
with ActorLogging {
import MarketTradeService._
import trade.TradeActor.Cancel
import scala.reflect.runtime.currentMirror
implicit val akkaSystem = context.system
// maps id of trade to the trade actor
val trades = collection.mutable.Map[UUID, ActorRef]()
val dynamic = currentMirror.mkToolBox()
val mediator = DistributedPubSub(context.system).mediator
override def preStart() = {
// load pending conditions from bagel
bagel.findTradesByStatus(marketName, Seq(TradeStatus.Pending, TradeStatus.Bought)).map { pendingTrades =>
pendingTrades.foreach { trade =>
trades += trade.id -> context.actorOf(TradeActor.props(trade, bagel), trade.id.toString)
}
}
log.info(s"$marketName actor started")
}
def receive: Receive = {
case update: MarketUpdate =>
// forward to all child actors of me
context.system.actorSelection(s"${self.path}/*") ! update
case PostTrade(user, request, Some(sender)) =>
postTrade(user, request, sender)
case UpdateTrade(user, tradeId, request, Some(sender)) =>
updateTrade(user, tradeId, request, sender)
case DeleteTrade(trade, Some(sender)) =>
deleteTrade(trade, sender)
case x =>
log.warning(s"received unknown message - $x")
}
/**
* Add trade to DB and insert buy conditions.
* @param user
* @param request
* @param senderRef response to sender when finished with Some(trade) or None
* @return
*/
private def postTrade(user: UserData, request: TradeRequest, senderRef: ActorRef) = {
val trade = Trade.fromRequest(request, user.id)
log.info(s"MarketTradeService.postTrade - $request")
val stuff = for {
result <- bagel.insert(trade)
balance <- bagel.findBalance(user.id, trade.apiKeyId, trade.info.baseCurrency)
} yield (result, balance)
stuff.onComplete {
// trade insert result willl be > 0 for success
// and balance must be > base quantity
case Success((result, Some(balance))) if (result > 0 && balance.availableBalance > trade.baseQuantity) =>
val newBalance = balance.copy(availableBalance = balance.availableBalance - trade.baseQuantity)
bagel.updateBalance(newBalance)
// start new process to watch this trade
trades += trade.id -> context.actorOf(TradeActor.props(trade, bagel), trade.id.toString)
// send trade response to sender
senderRef ! Some(trade)
case _ =>
senderRef ! None
}
}
private def deleteTrade(trade: Trade, sender: ActorRef) = {
if (trades.contains(trade.id)) {
trades(trade.id) ! Cancel(sender)
} else {
sender ! None
}
}
private def updateTrade(user: UserData, tradeId: UUID, request: TradeRequest, sender: ActorRef) = {
if (trades.contains(tradeId)) {
trades(tradeId) ! Update(user, request, sender)
} else {
sender ! None
}
}
}
| asciiu/fomo | bittrex-exchange/src/main/scala/com/flowy/bexchange/MarketTradeService.scala | Scala | apache-2.0 | 4,364 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.crypto
import java.nio.charset.StandardCharsets
import java.security._
import javax.crypto.Mac
import org.apache.commons.codec.binary.Base64
class SymmetricHasher(secretKey: Key) {
def hash(data: PlainText): Scrambled = {
try {
val sha512_HMAC = Mac.getInstance(secretKey.getAlgorithm)
sha512_HMAC.init(secretKey)
Scrambled(Base64.encodeBase64String(sha512_HMAC.doFinal(data.value.getBytes(StandardCharsets.UTF_8))))
}
catch {
case nsae: NoSuchAlgorithmException => {
throw new SecurityException("Algorithm '" + secretKey.getAlgorithm + "' is not supported", nsae)
}
case ike: InvalidKeyException => {
throw new SecurityException("The private key is invalid", ike)
}
case se: SignatureException => {
throw new SecurityException("Signature error", se)
}
}
}
}
| scottcutts/crypto | src/main/scala/uk/gov/hmrc/crypto/SymmetricHasher.scala | Scala | apache-2.0 | 1,485 |
package com.seanshubin.todo.persistence.domain
trait PreLoader {
def loadInitialState(): Unit
}
| SeanShubin/todo-persistence | domain/src/main/scala/com/seanshubin/todo/persistence/domain/PreLoader.scala | Scala | unlicense | 99 |
package com.arcusys.valamis.persistence.impl.scorm.model
import com.arcusys.valamis.lesson.scorm.model.manifest.ConditionCombination.ConditionCombination
import com.arcusys.valamis.lesson.scorm.model.manifest.ConditionRuleType.ConditionRuleType
case class ConditionRuleModel(id: Option[Long],
sequencingId: Long,
combination: ConditionCombination,
ruleType: ConditionRuleType,
action: Option[String])
| igor-borisov/valamis | valamis-slick-persistence/src/main/scala/com/arcusys/valamis/persistence/impl/scorm/model/ConditionRuleModel.scala | Scala | gpl-3.0 | 526 |
case class Address(streetNumber: Int, streetName: String)
object Test {
def main(args: Array[String]): Unit = {
val len = GenLens[Address](_.streetNumber + 3) // error
val address = Address(10, "High Street")
assert(len.get(address) == 10)
val addr2 = len.set(5, address)
assert(len.get(addr2) == 5)
}
} | som-snytt/dotty | tests/neg-staging/i5941/usage_2.scala | Scala | apache-2.0 | 328 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import scala.collection.mutable
import scala.util.Random
import org.apache.spark.{SparkContext, SparkFunSuite}
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.util.Utils
class PowerIterationClusteringSuite extends SparkFunSuite with MLlibTestSparkContext {
import org.apache.spark.mllib.clustering.PowerIterationClustering._
/** Generates a circle of points. */
private def genCircle(r: Double, n: Int): Array[(Double, Double)] = {
Array.tabulate(n) { i =>
val theta = 2.0 * math.Pi * i / n
(r * math.cos(theta), r * math.sin(theta))
}
}
/** Computes Gaussian similarity. */
private def sim(x: (Double, Double), y: (Double, Double)): Double = {
val dist2 = (x._1 - y._1) * (x._1 - y._1) + (x._2 - y._2) * (x._2 - y._2)
math.exp(-dist2 / 2.0)
}
test("power iteration clustering") {
// Generate two circles following the example in the PIC paper.
val r1 = 1.0
val n1 = 10
val r2 = 4.0
val n2 = 40
val n = n1 + n2
val points = genCircle(r1, n1) ++ genCircle(r2, n2)
val similarities = for (i <- 1 until n; j <- 0 until i) yield {
(i.toLong, j.toLong, sim(points(i), points(j)))
}
val model = new PowerIterationClustering()
.setK(2)
.setMaxIterations(40)
.run(sc.parallelize(similarities, 2))
val predictions = Array.fill(2)(mutable.Set.empty[Long])
model.assignments.collect().foreach { a =>
predictions(a.cluster) += a.id
}
assert(predictions.toSet == Set((0 until n1).toSet, (n1 until n).toSet))
val model2 = new PowerIterationClustering()
.setK(2)
.setMaxIterations(10)
.setInitializationMode("degree")
.run(sc.parallelize(similarities, 2))
val predictions2 = Array.fill(2)(mutable.Set.empty[Long])
model2.assignments.collect().foreach { a =>
predictions2(a.cluster) += a.id
}
assert(predictions2.toSet == Set((0 until n1).toSet, (n1 until n).toSet))
}
test("power iteration clustering on graph") {
// Generate two circles following the example in the PIC paper.
val r1 = 1.0
val n1 = 10
val r2 = 4.0
val n2 = 40
val n = n1 + n2
val points = genCircle(r1, n1) ++ genCircle(r2, n2)
val similarities = for (i <- 1 until n; j <- 0 until i) yield {
(i.toLong, j.toLong, sim(points(i), points(j)))
}
val edges = similarities.flatMap { case (i, j, s) =>
if (i != j) {
Seq(Edge(i, j, s), Edge(j, i, s))
} else {
None
}
}
val graph = Graph.fromEdges(sc.parallelize(edges, 2), 0.0)
val model = new PowerIterationClustering()
.setK(2)
.setMaxIterations(40)
.run(graph)
val predictions = Array.fill(2)(mutable.Set.empty[Long])
model.assignments.collect().foreach { a =>
predictions(a.cluster) += a.id
}
assert(predictions.toSet == Set((0 until n1).toSet, (n1 until n).toSet))
val model2 = new PowerIterationClustering()
.setK(2)
.setMaxIterations(10)
.setInitializationMode("degree")
.run(sc.parallelize(similarities, 2))
val predictions2 = Array.fill(2)(mutable.Set.empty[Long])
model2.assignments.collect().foreach { a =>
predictions2(a.cluster) += a.id
}
assert(predictions2.toSet == Set((0 until n1).toSet, (n1 until n).toSet))
}
test("normalize and powerIter") {
/*
Test normalize() with the following graph:
0 - 3
| \\ |
1 - 2
The affinity matrix (A) is
0 1 1 1
1 0 1 0
1 1 0 1
1 0 1 0
D is diag(3, 2, 3, 2) and hence W is
0 1/3 1/3 1/3
1/2 0 1/2 0
1/3 1/3 0 1/3
1/2 0 1/2 0
*/
val similarities = Seq[(Long, Long, Double)](
(0, 1, 1.0), (0, 2, 1.0), (0, 3, 1.0), (1, 2, 1.0), (2, 3, 1.0))
// scalastyle:off
val expected = Array(
Array(0.0, 1.0/3.0, 1.0/3.0, 1.0/3.0),
Array(1.0/2.0, 0.0, 1.0/2.0, 0.0),
Array(1.0/3.0, 1.0/3.0, 0.0, 1.0/3.0),
Array(1.0/2.0, 0.0, 1.0/2.0, 0.0))
// scalastyle:on
val w = normalize(sc.parallelize(similarities, 2))
w.edges.collect().foreach { case Edge(i, j, x) =>
assert(x ~== expected(i.toInt)(j.toInt) absTol 1e-14)
}
val v0 = sc.parallelize(Seq[(Long, Double)]((0, 0.1), (1, 0.2), (2, 0.3), (3, 0.4)), 2)
val w0 = Graph(v0, w.edges)
val v1 = powerIter(w0, maxIterations = 1).collect()
val u = Array(0.3, 0.2, 0.7/3.0, 0.2)
val norm = u.sum
val u1 = u.map(x => x / norm)
v1.foreach { case (i, x) =>
assert(x ~== u1(i.toInt) absTol 1e-14)
}
}
test("model save/load") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
val model = PowerIterationClusteringSuite.createModel(sc, 3, 10)
try {
model.save(sc, path)
val sameModel = PowerIterationClusteringModel.load(sc, path)
PowerIterationClusteringSuite.checkEqual(model, sameModel)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
object PowerIterationClusteringSuite extends SparkFunSuite {
def createModel(sc: SparkContext, k: Int, nPoints: Int): PowerIterationClusteringModel = {
val assignments = sc.parallelize(
(0 until nPoints).map(p => PowerIterationClustering.Assignment(p, Random.nextInt(k))))
new PowerIterationClusteringModel(k, assignments)
}
def checkEqual(a: PowerIterationClusteringModel, b: PowerIterationClusteringModel): Unit = {
assert(a.k === b.k)
val aAssignments = a.assignments.map(x => (x.id, x.cluster))
val bAssignments = b.assignments.map(x => (x.id, x.cluster))
val unequalElements = aAssignments.join(bAssignments).filter {
case (id, (c1, c2)) => c1 != c2 }.count()
assert(unequalElements === 0L)
}
}
| gioenn/xSpark | mllib/src/test/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringSuite.scala | Scala | apache-2.0 | 6,712 |
object Test {
import Macro.*
def main(args: Array[String]): Unit = {
val ls = List(1, 2, 3)
val ls2 = List('a', 'b', 'c')
optimize(ls.filter(x => x < 3).filter(x => x > 1))
optimize(ls2.filter(x => x < 'c').filter(x => x > 'a'))
optimize(ls.filter(x => x < 3).filter(x => x > 1).filter(x => x == 2))
optimize(ls.filter(x => x < 3).foreach(x => println(x)))
optimize(List(1, 2, 3).map(a => a * 2).map(b => b.toString))
optimize(List(55, 67, 87).map(a => a.toChar).map(b => b.toString))
}
}
| dotty-staging/dotty | tests/run-macros/quote-matching-optimize-1/Test_2.scala | Scala | apache-2.0 | 529 |
// Author: Wizmann
object PE_1 {
def main(args: Array[String]) {
println((0 until 1000).filter(x => x % 3 == 0 || x % 5 == 0).sum)
}
} | neutronest/eulerproject-douby | e1/e1.scala | Scala | mit | 150 |
@main def Test = {
val t1: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)
val t2: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25.0)
val t3: Tuple = (1, 2, 3, 4, 5, 6, 7, 8.0, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)
assert((1, 2) == (1, 2.0))
assert(t1 == t2)
assert(t1 == t3)
assert(t1.hashCode == t2.hashCode)
assert(t1.hashCode == t3.hashCode)
val t4: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, t1)
val t5: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, t2)
assert((1, (1, 2)) == (1, (1, 2.0)))
assert(t4 == t5)
assert(t4.hashCode == t5.hashCode)
val t6: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, Array(1, 2, 3))
val t7: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, Array(1, 2, 3))
assert((1, 2, Array(3, 4)) != (1, 2, Array(3, 4)))
assert(t6 != t7)
assert(t6.hashCode != t7.hashCode)
val t8: Tuple = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)
assert(t1 != t8)
assert(t1.hashCode != t8.hashCode)
}
| som-snytt/dotty | tests/run/i8314.scala | Scala | apache-2.0 | 1,339 |
/*
* Original implementation (C) 2009-2016 Lightbend Inc. (https://www.lightbend.com).
* Adapted and extended in 2017 by Eugene Yokota
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gigahorse
import com.typesafe.sslconfig.ssl._
import com.typesafe.sslconfig.util.NoopLogger
import javax.net.ssl._
import java.security.KeyStore
import java.security.cert.X509Certificate
private[gigahorse] object SSL {
def buildContext(sslConfig: SSLConfigSettings): (SSLContext, Option[TrustManager]) = {
if (sslConfig.default) {
(SSLContext.getDefault, None)
} else {
// break out the static methods as much as we can...
val keyManagerFactory = buildKeyManagerFactory(sslConfig)
val trustManagerFactory = buildTrustManagerFactory(sslConfig)
val tmf = TrustManagerFactory.getInstance(sslConfig.trustManagerConfig.algorithm)
tmf.init(null.asInstanceOf[KeyStore])
val trustManager: X509TrustManager = tmf.getTrustManagers()(0).asInstanceOf[X509TrustManager]
val context = new ConfigSSLContextBuilder(NoopLogger.factory(), sslConfig, keyManagerFactory, trustManagerFactory).build()
val trustManagerOpt = Option(trustManager)
(context, trustManagerOpt)
}
}
def buildKeyManagerFactory(ssl: SSLConfigSettings): KeyManagerFactoryWrapper =
new DefaultKeyManagerFactoryWrapper(ssl.keyManagerConfig.algorithm)
def buildTrustManagerFactory(ssl: SSLConfigSettings): TrustManagerFactoryWrapper =
new DefaultTrustManagerFactoryWrapper(ssl.trustManagerConfig.algorithm)
lazy val insecureTrustManager: X509TrustManager = new X509TrustManager {
def checkClientTrusted(certs: Array[X509Certificate], authType: String): Unit = ()
def checkServerTrusted(certs: Array[X509Certificate], authType: String): Unit = ()
def getAcceptedIssuers(): Array[X509Certificate] = Array()
}
lazy val insecureHostnameVerifier: HostnameVerifier = new HostnameVerifier {
def verify(hostname: String, session: SSLSession): Boolean = true
}
}
| eed3si9n/gigahorse | core/src/main/scala/gigahorse/SSL.scala | Scala | apache-2.0 | 2,525 |
// Check that selecting a member from a `UncheckedNull`able union is unsound.
object Test {
def main(args: Array[String]): Unit = {
val s: String|UncheckedNull = "hello"
assert(s.length == 5)
val s2: String|UncheckedNull = null
try {
s2.length // should throw
assert(false)
} catch {
case e: NullPointerException =>
// ok: selecting on a UncheckedNull can throw
}
}
}
| som-snytt/dotty | tests/explicit-nulls/run/java-null.scala | Scala | apache-2.0 | 424 |
/*
* Copyright (c) 2015-2022 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Attribution Notice under the terms of the Apache License 2.0
*
* This work was created by the collective efforts of the openCypher community.
* Without limiting the terms of Section 6, any Derivative Work that is not
* approved by the public consensus process of the openCypher Implementers Group
* should not be described as “Cypher” (and Cypher® is a registered trademark of
* Neo4j Inc.) or as "openCypher". Extensions by implementers or prototypes or
* proposals for change that have been documented or implemented should only be
* described as "implementation extensions to Cypher" or as "proposed changes to
* Cypher that are not yet approved by the openCypher community".
*/
package org.opencypher.tools.tck
import org.opencypher.tools.tck.api.ExpectError
import org.opencypher.tools.tck.api.Scenario
import org.opencypher.tools.tck.constants.TCKTags
import org.scalatest.AppendedClues
import org.scalatest.Assertion
import org.scalatest.OptionValues
import org.scalatest.matchers.should.Matchers
trait ValidateScenario extends AppendedClues with Matchers with OptionValues with ValidateSteps {
private val scenarioNamesByFeature = scala.collection.mutable.HashMap[(List[String], String), List[(String, Option[Int])]]()
def validateScenario(scenario: Scenario): Assertion = {
withClue("scenario has a number, greater than zero") {
scenario.number.value should be > 0
}
withClue("scenario has a unique name in feature") {
val featureSignature: (List[String], String) = (scenario.categories, scenario.featureName)
val scenarioSignature = (scenario.name, scenario.exampleIndex)
val scenarioSignaturesBefore = scenarioNamesByFeature.getOrElseUpdate(featureSignature, List[(String, Option[Int])]())
scenarioNamesByFeature.update(featureSignature, scenarioSignaturesBefore :+ scenarioSignature)
scenarioSignaturesBefore should not contain scenarioSignature
}
withClue("scenario with an example name should have an example index") {
(scenario.exampleName, scenario.exampleIndex) should matchPattern {
case (Some(_), Some(_)) =>
case (None, _) =>
// (Some(_), None) is the not allowed case
}
}
validateSteps(scenario.steps, scenario.tags)
}
}
| opencypher/openCypher | tools/tck-integrity-tests/src/test/scala/org/opencypher/tools/tck/ValidateScenario.scala | Scala | apache-2.0 | 2,950 |
#!/bin/sh
exec scala "$0" "$@"
!#
def generateJsonCodecTupleN(n: Int): String = {
val types: String = (1 to n).map(i => s"T$i").mkString(", ")
val typesWithConstraints: String =
(1 to n).map(i => s"T$i: JsonCodec").mkString(", ")
val toJsonLines: String = (1 to n).map {
i => s""" "_$i" -> json.toJson[T$i](t._$i)"""
}.mkString(",\\n")
val fromJsonLines: String = (1 to n).map {
i => s""" _$i <- (j ~> "_$i").flatMap(json.fromJson[T$i])"""
}.mkString("\\n")
val components: String = (1 to n).map(i => s"_$i").mkString(", ")
s"""/**
| * Implements [[JsonCodec]] for `Tuple$n` whenever the component types
| * have instances of [[JsonCodec]] in implicit scope.
| */
|implicit def jsonCodecTuple$n[$typesWithConstraints]:
|JsonCodec[($types)] = new JsonCodec[($types)] {
| import JsonImplicits._
| def toJson(t: ($types)): Json = Json.obj(
|$toJsonLines
| )
| def fromJson(j: Json): Option[($types)] = for {
|$fromJsonLines
| } yield ($components)
|}
|""".stripMargin
}
(2 to 7).foreach { i => println(generateJsonCodecTupleN(i)) }
| cjdev/serialization | generate-codecs.scala | Scala | mit | 1,147 |
package io.vamp.container_driver
import io.vamp.model.artifact.{ HealthCheck, Port }
import io.vamp.model.reader.Time
import org.junit.runner.RunWith
import org.scalatest.{ FlatSpec, Matchers }
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HealthCheckMergerSpec extends FlatSpec with Matchers with HealthCheckMerger {
val testHealthCheck = Some(List(HealthCheck("/", "webport", Time(5), Time(5), Time(5), 5, "HTTP")))
val testHealthCheckTwo = Some(List(HealthCheck("/two", "someport", Time(5), Time(5), Time(5), 5, "HTTP")))
val testPort = List(Port("webport", None, None, 8080, Port.Type.Http))
val testPortTwo = List(Port("someport", None, None, 9000, Port.Type.Http))
"HealthCheckMerger" should "return no health checks if service level is an empty list" in {
val mergeResult = mergeHealthChecks(testHealthCheck, Some(List()), testHealthCheck, testPort)
mergeResult should equal(List())
}
it should "return only the service level health check if all paths are equal" in {
val mergeResult = mergeHealthChecks(
testHealthCheck,
testHealthCheck.map(_.map(_.copy(failures = 10))),
testHealthCheck,
testPort)
mergeResult should equal(List(HealthCheck("/", "webport", Time(5), Time(5), Time(5), 10, "HTTP")))
}
it should "return cluster if service is not defined" in {
val mergeResult = mergeHealthChecks(
testHealthCheck,
None,
testHealthCheckTwo, testPort ++ testPortTwo)
mergeResult should equal(testHealthCheckTwo.get)
}
it should "return an empty list if all are not defined" in {
val mergeResult = mergeHealthChecks(None, None, None, List())
mergeResult should equal(List())
}
it should "give cluster precedence over breed on same path and port" in {
val testHealthCheckFailDiff = testHealthCheck.map(_.map(_.copy(failures = 20)))
val mergeResult = mergeHealthChecks(
testHealthCheck,
None,
testHealthCheckFailDiff,
testPort)
mergeResult should equal(testHealthCheckFailDiff.get)
}
it should "merge service and cluster if they have different paths but ports are available for that particular service" in {
val mergeResult = mergeHealthChecks(
None,
Some(List(HealthCheck("/", "webport", Time(5), Time(5), Time(5), 5, "HTTP"))),
Some(List(HealthCheck("/2", "webport", Time(5), Time(5), Time(5), 5, "HTTP"))),
testPort)
mergeResult should equal(List(
HealthCheck("/", "webport", Time(5), Time(5), Time(5), 5, "HTTP"),
HealthCheck("/2", "webport", Time(5), Time(5), Time(5), 5, "HTTP")))
}
it should "merge cluster if cluster overrides service and breed" in {
val mergeResult = mergeHealthChecks(
testHealthCheck,
testHealthCheck,
testHealthCheck,
testPort)
mergeResult should equal(testHealthCheck.get)
}
it should "merge not the same path if ports are different" in {
val testHealthCheckDiffPath = testHealthCheck.map(_.map(_.copy(port = "someport")))
val mergeResult = mergeHealthChecks(None, testHealthCheckDiffPath, testHealthCheck, testPort ++ testPortTwo)
mergeResult should equal(testHealthCheckDiffPath.get)
}
it should "merge same ports if path is different" in {
val testHealthCheckDiffPath = testHealthCheck.map(_.map(_.copy(path = "/diff")))
val mergeResult = mergeHealthChecks(None, testHealthCheckDiffPath, testHealthCheck, testPort)
mergeResult should equal(testHealthCheckDiffPath.get ++ testHealthCheck.get)
}
it should "merge a larger list" in {
val healthChecks = (1 to 30).map(i ⇒ HealthCheck(s"$i", "webport", Time(5), Time(5), Time(5), 5, "HTTP")).toList
val mergeResult = mergeHealthChecks(
Some(healthChecks.take(10)),
Some(healthChecks.slice(10, 20)),
Some(healthChecks.slice(20, 30)),
testPort)
mergeResult should equal(healthChecks.drop(10))
}
it should "return the breed level health checks is others are not defined" in {
val mergeResult = mergeHealthChecks(testHealthCheck, None, None, testPort)
mergeResult should equal(testHealthCheck.get)
}
it should "merge workflow with precedence over breed level" in {
mergeHealthChecks(testHealthCheck, testHealthCheckTwo) should equal(testHealthCheck.get)
}
it should "return an empty list if workflow is defined" in {
mergeHealthChecks(Some(List()), testHealthCheck) should equal(List())
}
it should "return breed level if workflow level is not defined" in {
mergeHealthChecks(None, testHealthCheck) should equal(testHealthCheck.get)
}
it should "return an empty list if nothing is defined for workflow and breed" in {
mergeHealthChecks(None, None) should equal(List())
}
it should "return an empty list if nothing is defined for cluster and service" in {
mergeHealthChecks(None, None, None, List()) should equal(List())
}
}
| magneticio/vamp | container_driver/src/test/scala/io/vamp/container_driver/HealthCheckMergerSpec.scala | Scala | apache-2.0 | 4,911 |
package cobase.post
import javax.inject.Inject
import java.util.UUID
import cobase.user.User
import scala.concurrent.Future
/**
* Handles actions to posts.
*/
class PostService @Inject() (postDAO: PostDAO) {
def findAll: Future[Seq[Post]] = postDAO.findAll
def findById(postId: UUID): Future[Option[Post]] = postDAO.findById(postId)
/**
* Retrieves a post that matches the specified search phrase.
*/
def findByPhrase(phrase: String): Future[Seq[Post]] = postDAO.findByPhrase(phrase)
def findLatestPostsForGroup(groupId: UUID): Future[Seq[Post]] = postDAO.findLatestPostsForGroup(groupId)
def add(post: Post): Future[Post] = postDAO.add(post)
def update(post: Post): Future[Post] = postDAO.update(post)
/**
* Get posts found based on user subscriptions
*/
def getDashboardPosts(user: User): Future[Seq[DashboardPost]] = postDAO.getDashboardPosts(user)
}
| Cobase/cobase-pro | app/cobase/post/PostService.scala | Scala | mit | 898 |
/*
* Copyright (c) 2012, TU Berlin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the TU Berlin nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL TU Berlin BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package de.tuberlin.uebb.sl2.modules
/**
* Alpha conversion for SL expressions.
*/
trait AlphaConversion {
this: Syntax =>
type NewName = Unit => String
def substitute(fresh : NewName, subst : Map[VarName, VarName], a : Alternative) : Alternative = {
val bound = vars(a.pattern)
val rename = bound.intersect(subst.values.toSet)
if (rename.isEmpty) {
Alternative(a.pattern, substitute(fresh, subst -- vars(a.pattern), a.expr))
} else {
/* alpha conversion */
val renameMap = Map() ++ (rename map (r => (r -> fresh())))
substitute(fresh, subst, substitute(fresh, renameMap, a))
}
}
def substitute(subst : Map[VarName, VarName], pattern : Pattern) : Pattern = pattern match {
case PatternExpr(c, patterns, _) => PatternExpr(c, patterns map (substitute(subst, _)))
case PatternVar(x, _) if subst.contains(x) => PatternVar(subst(x))
case p@_ => p
}
def substitute(fresh : NewName, subst : Map[VarName, VarName], e : Expr) : Expr = e match {
case Conditional(i, t, e, attr) => Conditional(substitute(fresh, subst, i),
substitute(fresh, subst, t),
substitute(fresh, subst, e), attr)
case Lambda(pats, e, attr) => {
val bound = pats.flatMap(vars).toSet
val rename = bound.intersect(subst.values.toSet)
if (rename.isEmpty) {
Lambda(pats, substitute(fresh, subst -- bound, e), attr)
} else {
/* alpha conversion */
val renameMap = Map() ++ (rename map (r => (r -> fresh())))
substitute(fresh, subst, Lambda(pats map (substitute(renameMap, _)), substitute(fresh, renameMap, e)))
}
}
case Case(e1, alts, attr) => {
Case(substitute(fresh, subst, e1), alts map {a => substitute(fresh, subst, a)}, attr)
}
case Let(defs, rhs, attr) => {
val bound = (defs map (_.lhs)).toSet
val rename = bound.intersect(subst.values.toSet)
if (rename.isEmpty) {
Let(defs map {d => d.copy(rhs = substitute(fresh, subst -- bound, d.rhs))}, substitute(fresh, subst -- bound, rhs))
} else {
val renameMap = Map() ++ (rename map (r => (r -> fresh())))
val l2 = Let(defs map {d => d.copy(rhs = substitute(fresh, renameMap, d.rhs))}, substitute(fresh, renameMap, rhs))
substitute(fresh, subst, l2)
}
}
case App(l, r, attr) => App(substitute(fresh, subst, l), substitute(fresh, subst, r), attr)
case ExVar(Syntax.Var(x,Syntax.LocalMod), attr) => ExVar(Syntax.Var(subst.get(x).getOrElse(x)), attr)
case c@ExVar(_, _) => c
case c@ExCon(_, _) => c
case c@ConstInt(_, _) => c
case c@ConstReal(_, _) => c
case c@ConstChar(_, _) => c
case c@ConstString(_, _) => c
case c@JavaScript(_, _, _) => c
}
}
| mzuber/simple-language | src/main/scala/modules/AlphaConversion.scala | Scala | bsd-3-clause | 4,387 |
package unfiltered.response
import org.specs2.mutable._
object PassSpecJetty
extends Specification
with unfiltered.specs2.jetty.Planned
with PassSpec
object PassSpecNetty
extends Specification
with unfiltered.specs2.netty.Planned
with PassSpec
trait PassSpec extends Specification with unfiltered.specs2.Hosted {
import unfiltered.request._
import unfiltered.request.{Path => UFPath}
def intent[A,B]: unfiltered.Cycle.Intent[A,B] =
Pass.onPass(intent1, intent2)
def intent1[A,B]: unfiltered.Cycle.Intent[A,B] = {
case GET(UFPath(Seg("intent1"::Nil))) => ResponseString("intent1")
}
def intent2[A,B]: unfiltered.Cycle.Intent[A,B] = {
case GET(_) => ResponseString("intent2")
}
"Pass" should {
"match in the first intent" in {
val resp = http(host / "intent1").as_string
resp must_== "intent1"
}
"match in the second intent" in {
val resp = http(host / "whatever").as_string
resp must_== "intent2"
}
"not match with POST" in {
val resp = httpx(req(host) << Map("what"-> "oh"))
resp.code must_== 404
}
}
}
| hamnis/unfiltered | library/src/test/scala/PassSpec.scala | Scala | mit | 1,100 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import cascading.tuple.{ Tuple => CTuple, Fields }
import cascading.pipe.joiner.{ Joiner => CJoiner, JoinerClosure }
import cascading.pipe.{ CoGroup, Pipe }
import com.twitter.scalding._
import scala.collection.JavaConverters._
import com.twitter.scalding.TupleConverter.tuple2Converter
import com.twitter.scalding.TupleSetter.tup2Setter
object CoGrouped {
// distinct by mapped, but don't reorder if the list is unique
final def distinctBy[T, U](list: List[T])(fn: T => U): List[T] = {
@annotation.tailrec
def go(l: List[T], seen: Set[U] = Set[U](), acc: List[T] = Nil): List[T] = l match {
case Nil => acc.reverse // done
case h :: tail =>
val uh = fn(h)
if (seen(uh))
go(tail, seen, acc)
else
go(tail, seen + uh, h :: acc)
}
go(list)
}
}
object CoGroupable {
/*
* This is the default empty join function needed for CoGroupable and HashJoinable
*/
def castingJoinFunction[V]: (Any, Iterator[CTuple], Seq[Iterable[CTuple]]) => Iterator[V] =
{ (k, iter, empties) =>
assert(empties.isEmpty, "this join function should never be called with non-empty right-most")
iter.map(_.getObject(Grouped.ValuePosition).asInstanceOf[V])
}
}
/**
* Represents something than can be CoGrouped with another CoGroupable
*/
trait CoGroupable[K, +R] extends HasReducers with HasDescription with java.io.Serializable {
/**
* This is the list of mapped pipes, just before the (reducing) joinFunction is applied
*/
def inputs: List[TypedPipe[(K, Any)]]
def keyOrdering: Ordering[K]
/**
* This function is not type-safe for others to call, but it should
* never have an error. By construction, we never call it with incorrect
* types.
* It would be preferable to have stronger type safety here, but unclear
* how to achieve, and since it is an internal function, not clear it
* would actually help anyone for it to be type-safe
*/
protected def joinFunction: (K, Iterator[CTuple], Seq[Iterable[CTuple]]) => Iterator[R]
/**
* Smaller is about average values/key not total size (that does not matter, but is
* clearly related).
*
* Note that from the type signature we see that the right side is iterated (or may be)
* over and over, but the left side is not. That means that you want the side with
* fewer values per key on the right. If both sides are similar, no need to worry.
* If one side is a one-to-one mapping, that should be the "smaller" side.
*/
def cogroup[R1, R2](smaller: CoGroupable[K, R1])(fn: (K, Iterator[R], Iterable[R1]) => Iterator[R2]): CoGrouped[K, R2] = {
val self = this
val leftSeqCount = self.inputs.size - 1
val jf = joinFunction // avoid capturing `this` in the closure below
val smallerJf = smaller.joinFunction
new CoGrouped[K, R2] {
val inputs = self.inputs ++ smaller.inputs
val reducers = (self.reducers.toIterable ++ smaller.reducers.toIterable).reduceOption(_ max _)
val descriptions: Seq[String] = self.descriptions ++ smaller.descriptions
def keyOrdering = smaller.keyOrdering
/**
* Avoid capturing anything below as it will need to be serialized and sent to
* all the reducers.
*/
def joinFunction = { (k: K, leftMost: Iterator[CTuple], joins: Seq[Iterable[CTuple]]) =>
val (leftSeq, rightSeq) = joins.splitAt(leftSeqCount)
val joinedLeft = jf(k, leftMost, leftSeq)
// Only do this once, for all calls to iterator below
val smallerHead = rightSeq.head
val smallerTail = rightSeq.tail
// TODO: it might make sense to cache this in memory as an IndexedSeq and not
// recompute it on every value for the left if the smallerJf is non-trivial
// we could see how long it is, and possible switch to a cached version the
// second time through if it is small enough
val joinedRight = new Iterable[R1] {
def iterator = smallerJf(k, smallerHead.iterator, smallerTail)
}
fn(k, joinedLeft, joinedRight)
}
}
}
def join[W](smaller: CoGroupable[K, W]) =
cogroup[W, (R, W)](smaller)(Joiner.inner2)
def leftJoin[W](smaller: CoGroupable[K, W]) =
cogroup[W, (R, Option[W])](smaller)(Joiner.left2)
def rightJoin[W](smaller: CoGroupable[K, W]) =
cogroup[W, (Option[R], W)](smaller)(Joiner.right2)
def outerJoin[W](smaller: CoGroupable[K, W]) =
cogroup[W, (Option[R], Option[W])](smaller)(Joiner.outer2)
// TODO: implement blockJoin
}
trait CoGrouped[K, +R] extends KeyedListLike[K, R, CoGrouped] with CoGroupable[K, R] with WithReducers[CoGrouped[K, R]] with WithDescription[CoGrouped[K, R]] {
override def withReducers(reds: Int) = {
val self = this // the usual self => trick leads to serialization errors
val joinF = joinFunction // can't access this on self, since it is protected
new CoGrouped[K, R] {
def inputs = self.inputs
def reducers = Some(reds)
def keyOrdering = self.keyOrdering
def joinFunction = joinF
def descriptions: Seq[String] = self.descriptions
}
}
override def withDescription(description: String) = {
val self = this // the usual self => trick leads to serialization errors
val joinF = joinFunction // can't access this on self, since it is protected
new CoGrouped[K, R] {
def inputs = self.inputs
def reducers = self.reducers
def keyOrdering = self.keyOrdering
def joinFunction = joinF
def descriptions: Seq[String] = self.descriptions :+ description
}
}
/**
* It seems complex to push a take up to the mappers before a general join.
* For some cases (inner join), we could take at most n from each TypedPipe,
* but it is not clear how to generalize that for general cogrouping functions.
* For now, just do a normal take.
*/
override def bufferedTake(n: Int): CoGrouped[K, R] =
take(n)
// Filter the keys before doing the join
override def filterKeys(fn: K => Boolean): CoGrouped[K, R] = {
val self = this // the usual self => trick leads to serialization errors
val joinF = joinFunction // can't access this on self, since it is protected
new CoGrouped[K, R] {
val inputs = self.inputs.map(_.filterKeys(fn))
def reducers = self.reducers
def descriptions: Seq[String] = self.descriptions
def keyOrdering = self.keyOrdering
def joinFunction = joinF
}
}
override def mapGroup[R1](fn: (K, Iterator[R]) => Iterator[R1]): CoGrouped[K, R1] = {
val self = this // the usual self => trick leads to serialization errors
val joinF = joinFunction // can't access this on self, since it is protected
new CoGrouped[K, R1] {
def inputs = self.inputs
def reducers = self.reducers
def descriptions: Seq[String] = self.descriptions
def keyOrdering = self.keyOrdering
def joinFunction = { (k: K, leftMost: Iterator[CTuple], joins: Seq[Iterable[CTuple]]) =>
val joined = joinF(k, leftMost, joins)
/*
* After the join, if the key has no values, don't present it to the mapGroup
* function. Doing so would break the invariant:
*
* a.join(b).toTypedPipe.group.mapGroup(fn) == a.join(b).mapGroup(fn)
*/
Grouped.addEmptyGuard(fn)(k, joined)
}
}
}
override lazy val toTypedPipe: TypedPipe[(K, R)] = {
// Cascading handles the first item in join differently, we have to see if it is repeated
val firstCount = inputs.count(_ == inputs.head)
import Dsl._
import RichPipe.assignName
/*
* we only want key and value.
* Cascading requires you have the same number coming in as out.
* in the first case, we introduce (null0, null1), in the second
* we have (key1, value1), but they are then discarded:
*/
def outFields(inCount: Int): Fields =
List("key", "value") ++ (0 until (2 * (inCount - 1))).map("null%d".format(_))
// Make this stable so the compiler does not make a closure
val ord = keyOrdering
TypedPipeFactory({ (flowDef, mode) =>
val newPipe = Grouped.maybeBox[K, Any](ord, flowDef) { (tupset, ordKeyField) =>
if (firstCount == inputs.size) {
/**
* This is a self-join
* Cascading handles this by sending the data only once, spilling to disk if
* the groups don't fit in RAM, then doing the join on this one set of data.
* This is fundamentally different than the case where the first item is
* not repeated. That case is below
*/
val NUM_OF_SELF_JOINS = firstCount - 1
new CoGroup(assignName(inputs.head.toPipe[(K, Any)](("key", "value"))(flowDef, mode,
tupset)),
ordKeyField,
NUM_OF_SELF_JOINS,
outFields(firstCount),
WrappedJoiner(new DistinctCoGroupJoiner(firstCount, Grouped.keyGetter(ord), joinFunction)))
} else if (firstCount == 1) {
def keyId(idx: Int): String = "key%d".format(idx)
/**
* As long as the first one appears only once, we can handle self joins on the others:
* Cascading does this by maybe spilling all the streams other than the first item.
* This is handled by a different CoGroup constructor than the above case.
*/
def renamePipe(idx: Int, p: TypedPipe[(K, Any)]): Pipe =
p.toPipe[(K, Any)](List(keyId(idx), "value%d".format(idx)))(flowDef, mode,
tupset)
// This is tested for the properties we need (non-reordering)
val distincts = CoGrouped.distinctBy(inputs)(identity)
val dsize = distincts.size
val isize = inputs.size
def makeFields(id: Int): Fields = {
val comp = ordKeyField.getComparators.apply(0)
val fieldName = keyId(id)
val f = new Fields(fieldName)
f.setComparator(fieldName, comp)
f
}
val groupFields: Array[Fields] = (0 until dsize)
.map(makeFields)
.toArray
val pipes: Array[Pipe] = distincts
.zipWithIndex
.map { case (item, idx) => assignName(renamePipe(idx, item)) }
.toArray
val cjoiner = if (isize != dsize) {
// avoid capturing anything other than the mapping ints:
val mapping: Map[Int, Int] = inputs.zipWithIndex.map {
case (item, idx) =>
idx -> distincts.indexWhere(_ == item)
}.toMap
new CoGroupedJoiner(isize, Grouped.keyGetter(ord), joinFunction) {
val distinctSize = dsize
def distinctIndexOf(orig: Int) = mapping(orig)
}
} else {
new DistinctCoGroupJoiner(isize, Grouped.keyGetter(ord), joinFunction)
}
new CoGroup(pipes, groupFields, outFields(dsize), WrappedJoiner(cjoiner))
} else {
/**
* This is non-trivial to encode in the type system, so we throw this exception
* at the planning phase.
*/
sys.error("Except for self joins, where you are joining something with only itself,\\n" +
"left-most pipe can only appear once. Firsts: " +
inputs.collect { case x if x == inputs.head => x }.toString)
}
}
/*
* the CoGrouped only populates the first two fields, the second two
* are null. We then project out at the end of the method.
*/
val pipeWithRedAndDescriptions = {
RichPipe.setReducers(newPipe, reducers.getOrElse(-1))
RichPipe.setPipeDescriptions(newPipe, descriptions)
newPipe.project('key, 'value)
}
//Construct the new TypedPipe
TypedPipe.from[(K, R)](pipeWithRedAndDescriptions, ('key, 'value))(flowDef, mode, tuple2Converter)
})
}
}
abstract class CoGroupedJoiner[K](inputSize: Int, getter: TupleGetter[K], joinFunction: (K, Iterator[CTuple], Seq[Iterable[CTuple]]) => Iterator[Any]) extends CJoiner {
val distinctSize: Int
def distinctIndexOf(originalPos: Int): Int
// This never changes. Compute it once
protected val restIndices: IndexedSeq[Int] = (1 until inputSize).map { idx =>
val didx = distinctIndexOf(idx)
assert(didx > 0, "the left most can only be iterated once")
didx
}
override def getIterator(jc: JoinerClosure) = {
val iters = (0 until distinctSize).map { jc.getIterator(_).asScala.buffered }
val keyTuple = iters
.collectFirst { case iter if iter.nonEmpty => iter.head }
.get // One of these must have a key
val key = getter.get(keyTuple, 0)
val leftMost = iters.head
def toIterable(didx: Int) =
new Iterable[CTuple] { def iterator = jc.getIterator(didx).asScala }
val rest = restIndices.map(toIterable(_))
joinFunction(key, leftMost, rest).map { rval =>
// There always has to be the same number of resulting fields as input
// or otherwise the flow planner will throw
val res = CTuple.size(distinctSize)
res.set(0, key)
res.set(1, rval)
res
}.asJava
}
override def numJoins = distinctSize - 1
}
// If all the input pipes are unique, this works:
class DistinctCoGroupJoiner[K](count: Int,
getter: TupleGetter[K],
joinFunction: (K, Iterator[CTuple], Seq[Iterable[CTuple]]) => Iterator[Any])
extends CoGroupedJoiner[K](count, getter, joinFunction) {
val distinctSize = count
def distinctIndexOf(idx: Int) = idx
}
| avp1984/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/CoGrouped.scala | Scala | apache-2.0 | 14,143 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.mkldnn
import com.intel.analytics.bigdl.nn.Utils
import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect.ClassTag
/**
* Creates a module that takes a table as input and outputs the element at index `index`
* (positive or negative). This can be either a table or a Tensor.
* The gradients of the non-index elements are zeroed Tensors of the same size.
* This is true regardless of the depth of the encapsulated Tensor as the function used
* internally to do so is recursive.
* @param index the index to be selected
*/
@SerialVersionUID(- 7562114420457472987L)
class SelectTable(val index: Int)(implicit ev: TensorNumeric[Float]) extends MklDnnLayer {
override def updateOutput(in: Activity): Activity = {
val input = in.asInstanceOf[Table]
val index = if (this.index < 0) input.length() + this.index else this.index
require(input.contains(index), "index does not exist in the input table")
output = input[Activity](index)
output
}
override def updateGradInput(in: Activity, gradOutput: Activity): Table = {
val input = in.asInstanceOf[Table]
gradInput = T()
Utils.zeroTableCopy(gradInput.asInstanceOf[Table], input)
val index = if (this.index < 0) {
input.length() + this.index + 1
} else {
this.index
}
Utils.recursiveCopy(gradInput.asInstanceOf[Table](index), gradOutput)
require(gradInput.asInstanceOf[Table].contains(index), "Index exceeds the size of input table")
gradInput.asInstanceOf[Table]
}
override def toString: String = s"mkldnn.SelectTable($index)"
override def canEqual(other: Any): Boolean = other.isInstanceOf[SelectTable]
override def equals(other: Any): Boolean = other match {
case that: SelectTable =>
super.equals(that) &&
(that canEqual this) &&
index == that.index
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), index)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override private[mkldnn] def initFwdPrimitives(inputs: Array[MemoryData], phase: Phase) = {
_inputFormats = inputs
_outputFormats = Array(inputs(index))
(inputs, _outputFormats)
}
override private[mkldnn] def initBwdPrimitives(grad: Array[MemoryData], phase: Phase) = {
_gradInputFormats = Array(grad(index))
_gradOutputFormats = grad
(grad, _gradInputFormats)
}
}
object SelectTable {
def apply(dimension: Int)(implicit ev: TensorNumeric[Float]) : SelectTable = {
new SelectTable(dimension)
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/mkldnn/SelectTable.scala | Scala | apache-2.0 | 3,311 |
package net.liftmodules.squerylauth
package model
import net.liftweb.record.{MetaRecord, Record}
import net.liftweb.squerylrecord.KeyedRecord
import net.liftweb.record.field.{OptionalStringField, OptionalLongField, StringField, LongField}
import net.liftweb.common.Loggable
import lib.SquerylMetaRecord
import net.liftweb.squerylrecord.RecordTypeMode._
import org.squeryl.annotations.Column
class Permission extends Record[Permission] with KeyedRecord[Long] {
def meta = Permission
@Column("id")
val idField = new LongField(this)
/**
* This field is empty for permissions attached to a role
*/
val roleId = new OptionalStringField(this, 32)
/**
* This field is empty for permissions attached to a role
*/
val userId = new OptionalLongField(this)
val permission = new StringField(this, 1024)
//ToDo lazy val users = DbSchema.usersToPermissions.right(this)
lazy val roles = SquerylAuthSchema.roleToPermissions.right(this)
}
object Permission extends Permission with MetaRecord[Permission] with SquerylMetaRecord[Long, Permission] with Loggable {
lazy val table = SquerylAuthSchema.permissions
def createUserPermission(uid: Long, aPerm: APermission) = {
createRecord.userId(uid).permission(aPerm.toString)
}
def removeAllUserPermissions(uid: Long) = {
table.deleteWhere(_.userId === uid)
}
def toAPermission(perm: Permission) = APermission.fromString(perm.permission.get)
def fromAPermission(aPerm: APermission): Permission = Permission.createRecord.permission(aPerm.toString)
def userPermissions(uid: Long): List[APermission] = SquerylAuthSchema.permissions.where(_.userId === uid).toList.map(toAPermission)
}
| gensosrl/squeryl-auth-module | src/main/scala/net/liftmodules/squerylauth/model/Permission.scala | Scala | apache-2.0 | 1,676 |
/**
*
* Copyright (C) 2017 University of Bamberg, Software Technologies Research Group
* <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/>
*
* This file is part of the Data Structure Investigator (DSI) project, which received financial support by the
* German Research Foundation (DFG) under grant no. LU 1748/4-1, see
* <http://www.swt-bamberg.de/dsi/>.
*
* DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see
* the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>.
*
* DSI is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or any later version.
*
* DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY
* WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* The following people contributed to the conception and realization of the present DSI distribution (in
* alphabetic order by surname):
*
* - Jan H. Boockmann
* - Gerald Lüttgen
* - Thomas Rupprecht
* - David H. White
*
*/
/**
* @author DSI
*
* DsOliCell.scala created on Oct 29, 2014
*
* Description: Representation of a cell, i.e.,
* subregion of memory
*/
package boxcalculation
import pointstograph.DsOliVertexMemory
import boxcalculation.DsOliCell._
import boxcalculation.DsOliBox._
import scala.collection.mutable.Set
import pointstograph.DsOliType
import extlogger.DsOliLogger
/**
* @author DSI
*
* @constructor creates a cell
* @param id unique cell id
* @param bAddr the start address of this cell in memory
* @param eAddr the end address of this cell in memory
* @param cType the type associated with this cell
* @param vertexId the vertex id to which this cell belongs
*/
class DsOliCell(val id: CellId, val bAddr: Long, val eAddr: Long, val cType: DsOliType, var vertexId: Long) {
// Convenience constructors
def this(bAddr: Long, eAddr: Long, cType: DsOliType, vertexId: Long) = this(DsOliCell.getId(), bAddr, eAddr, cType, vertexId)
/**
* Create a deep copy of this cell, by carrying over
* the id for the cell
*
* @param boxStep the time step for the strand
* @return instance of the copied cycle
*/
def deepCopy(): DsOliCell = {
// Important to carry over the id
val copy = new DsOliCell(this.id, this.bAddr, this.eAddr, this.cType, this.vertexId)
return copy
}
override def toString(): String = {
"[" + this.getClass() + ": id = " + id + "," + "bAddr = " + bAddr.toHexString + "," + "eAddr = " + eAddr.toHexString + "," + "cType = " + cType + "," + "vertexRef = " + vertexId + "]"
}
override def equals(other: Any): Boolean = {
return other match {
case that: DsOliCell =>
if (that == null) {
DsOliLogger.error("DsOliCell::equals: called with NULL!")
return false
}
DsOliLogger.debug("DsOliCell::equals: called on element " + id + " with " + that.id)
DsOliLogger.debug("DsOliCell::equals: (this == that): " +
this.bAddr.toHexString + " == " + that.bAddr.toHexString + " && " +
this.eAddr.toHexString + " == " + that.eAddr.toHexString + " && " +
this.cType + " == " + that.cType)
this.bAddr == that.bAddr && this.eAddr == that.eAddr && this.cType == that.cType
case _ =>
DsOliLogger.debug("DsOliCell::equals: called on wrong element ")
false
}
}
}
object DsOliCell {
type CellId = Long
type CycleId = Long
// Important to start from 1, as zero means 0 reference
var id: Long = 1
/**
* Simple unique cell id generator
* @return unique cell id
*/
def getId(): Long = {
val retId = id;
id += 1
return retId
}
}
| uniba-swt/DSIsrc | src/boxcalculation/DsOliCell.scala | Scala | gpl-3.0 | 3,889 |
package se.gigurra.renderer.shapes
import se.gigurra.renderer.Model
import se.gigurra.renderer.Vertex
import se.gigurra.renderer.Vertices
import se.gigurra.renderer.Color
object Rect {
private def fillImpl(width: Float, height: Float, left: Float, down: Float, color: Color): Model = {
val right = left + width
val up = down + height
Model.triangles(
Vertices(
Vertex(right, up), Vertex(left, up),
Vertex(left, down), Vertex(left, down),
Vertex(right, down), Vertex(right, up)),
color)
}
private def lineImpl(width: Float, height: Float, left: Float, down: Float, color: Color): Model = {
val right = left + width
val up = down + height
Model.lines(
Vertices(
Vertex(left, down), Vertex(right, down),
Vertex(right, down), Vertex(right, up),
Vertex(right, up), Vertex(left, up),
Vertex(left, up), Vertex(left, down)),
color)
}
def fillCentered(width: Float, height: Float, color: Color) = fillImpl(width, height, -width / 2, -height / 2, color)
def lineCentered(width: Float, height: Float, color: Color) = lineImpl(width, height, -width / 2, -height / 2, color)
def fillUp(width: Float, height: Float, color: Color) = fillImpl(width, height, -width / 2, 0, color)
def lineUp(width: Float, height: Float, color: Color) = lineImpl(width, height, -width / 2, 0, color)
}
| GiGurra/gigurra-scala-2drenderer | src/main/scala/se/gigurra/renderer/shapes/Rect.scala | Scala | mit | 1,395 |
package com.asto.dmp.jdlp.base
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{Logging, SparkConf, SparkContext}
object Contexts extends Logging {
private var _sc: SparkContext = _
private var _hiveContext: HiveContext = _
private var _sqlContext: SQLContext = _
def hiveContext: HiveContext = {
if (_hiveContext == null) {
logInfo("对HiveContext进行实例化")
_hiveContext = new HiveContext(sparkContext)
}
_hiveContext
}
def sqlContext: SQLContext = {
if (_sqlContext == null) {
logInfo("对SQLContext进行实例化")
_sqlContext = new SQLContext(sparkContext)
}
_sqlContext
}
def sparkContext: SparkContext = {
if (_sc == null) {
logInfo("对SparkContext进行实例化")
_sc = initSparkContext()
}
_sc
}
def initSparkContext(master: String = null): SparkContext = {
val conf = new SparkConf().setAppName(Constants.APP_NAME)
val masterInCodes = Option(master)
val masterInSparkConf = conf.getOption("spark.master")
(masterInCodes, masterInSparkConf) match {
case (None, None) =>
logWarning(s"集群和程序代码中都没有设置Master参数,在${getClass.getName}的initSparkContext中对它设置成local")
conf.setMaster("local")
case (None, Some(_)) =>
logInfo("程序代码中都没有设置Master参数,但是集群中设置了Master参数,使用集群设置的Master参数")
case (Some(_), None) =>
logInfo("集群中没有设置Master参数,但是程序代码中都设置了Master参数,使用程序代码的Master参数")
conf.setMaster(masterInCodes.get)
case (Some(_), Some(_)) =>
logInfo("集群中设置了Master参数,程序代码中也设置了Master参数,程序代码的Master参数覆盖集群传入的Master参数")
conf.setMaster(masterInCodes.get)
}
logInfo(s" Master = ${conf.get("spark.master")},conf = ${conf.get("spark.app.name")} ")
this._sc = new SparkContext(conf)
_sc
}
def stopSparkContext() = {
logInfo("关闭SparkContext")
_sc.stop()
}
}
| zj-lingxin/jdlp | src/main/scala/com/asto/dmp/jdlp/base/Contexts.scala | Scala | mit | 2,175 |
package com.pktippa
object ReverseAList {
def main(args: Array[String]) {
// Given input list in List[Int]
var inputList=List(19,22,3,28,26,17,18,4,28,0);
// Getting the reversedList and printing each element using foreach
reverseList(inputList).foreach(println(_));
}
// Method reverseList - to reverse a List[Int]
// Accepts inputList : List[Int]
// Returns reversed list - List[Int]
def reverseList(inputList:List[Int]):List[Int] = {
// Initializing a list
var returnList = List[Int]();
// For loop using decrementation Ex: 5 to 0 by -1
for(n <- inputList.length-1 to 0 by -1){
// Appending element to the List
returnList = returnList :+ inputList(n);
}
// Returning returnList
returnList
}
} | pk-hackerrank/hr-funcprog | introduction/reverse-a-list/scala/src/com/pktippa/ReverseAList.scala | Scala | mit | 789 |
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.maproulette.controllers.api
import java.sql.Connection
import javax.inject.Inject
import org.locationtech.jts.geom.Envelope
import org.maproulette.Config
import org.maproulette.controllers.CRUDController
import org.maproulette.data._
import org.maproulette.models.dal.{DALManager, TagDAL, TaskDAL}
import org.maproulette.models._
import org.maproulette.exception.{
InvalidException,
LockedException,
NotFoundException,
StatusMessage
}
import org.maproulette.models.dal.mixin.TagDALMixin
import org.maproulette.session.{
SearchChallengeParameters,
SearchLocation,
SearchParameters,
SessionManager,
User
}
import org.maproulette.utils.Utils
import org.maproulette.services.osm._
import org.maproulette.provider.websockets.{WebSocketMessages, WebSocketProvider}
import org.wololo.geojson.{FeatureCollection, GeoJSONFactory}
import org.wololo.jts2geojson.GeoJSONReader
import play.api.libs.json._
import play.api.libs.ws.WSClient
import play.api.mvc._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.util.{Failure, Success}
/**
* The Task controller handles all operations for the Task objects.
* This includes CRUD operations and searching/listing.
* See {@link org.maproulette.controllers.CRUDController} for more details on CRUD object operations
*
* @author cuthbertm
*/
class TaskController @Inject() (
override val sessionManager: SessionManager,
override val actionManager: ActionManager,
override val dal: TaskDAL,
override val tagDAL: TagDAL,
dalManager: DALManager,
wsClient: WSClient,
webSocketProvider: WebSocketProvider,
config: Config,
components: ControllerComponents,
changeService: ChangesetProvider,
override val bodyParsers: PlayBodyParsers
) extends AbstractController(components)
with CRUDController[Task]
with TagsMixin[Task] {
import scala.concurrent.ExecutionContext.Implicits.global
// json reads for automatically reading Tasks from a posted json body
override implicit val tReads: Reads[Task] = Task.TaskFormat
// json writes for automatically writing Tasks to a json body response
override implicit val tWrites: Writes[Task] = Task.TaskFormat
// json writes for automatically writing Challenges to a json body response
implicit val cWrites: Writes[Challenge] = Challenge.writes.challengeWrites
// The type of object that this controller deals with.
override implicit val itemType = TaskType()
override implicit val tableName = this.dal.tableName
// json reads for automatically reading Tags from a posted json body
implicit val tagReads: Reads[Tag] = Tag.tagReads
implicit val commentReads: Reads[Comment] = Comment.commentReads
implicit val commentWrites: Writes[Comment] = Comment.commentWrites
implicit val tagChangeReads = ChangeObjects.tagChangeReads
implicit val tagChangeResultWrites = ChangeObjects.tagChangeResultWrites
implicit val tagChangeSubmissionReads = ChangeObjects.tagChangeSubmissionReads
implicit val changeReads = ChangeObjects.changeReads
implicit val changeSubmissionReads = ChangeObjects.changeSubmissionReads
implicit val taskBundleWrites: Writes[TaskBundle] = TaskBundle.taskBundleWrites
implicit val pointReviewWrites = ClusteredPoint.pointReviewWrites
override def dalWithTags: TagDALMixin[Task] = dal
/**
* This function allows sub classes to modify the body, primarily this would be used for inserting
* default elements into the body that shouldn't have to be required to create an object.
*
* @param body The incoming body from the request
* @return
*/
override def updateCreateBody(body: JsValue, user: User): JsValue = {
// add a default priority, this will be updated later when the task is created if there are
// priority rules defined in the challenge parent
val updatedBody = Utils.insertIntoJson(body, "priority", Challenge.PRIORITY_HIGH)(IntWrites)
// We need to update the geometries to make sure that we handle all the different types of
// geometries that you can deal with like WKB or GeoJSON
this.updateGeometryData(super.updateCreateBody(updatedBody, user))
}
/**
* In the case where you need to update the update body, usually you would not update it, but
* just in case.
*
* @param body The request body
* @return The updated request body
*/
override def updateUpdateBody(body: JsValue, user: User): JsValue =
this.updateGeometryData(super.updateUpdateBody(body, user))
private def updateGeometryData(body: JsValue): JsValue = {
val updatedBody = (body \\ "geometries").asOpt[String] match {
case Some(value) =>
// if it is a string, then it is either GeoJSON or a WKB
// just check to see if { is the first character and then we can assume it is GeoJSON
if (value.charAt(0) != '{') {
// TODO:
body
} else {
// just return the body because it handles this case correctly
body
}
case None =>
// if it maps to None then it simply could be that it is a JSON object
(body \\ "geometries").asOpt[JsValue] match {
case Some(value) =>
// need to convert to a string for the case class otherwise validation will fail
Utils.insertIntoJson(body, "geometries", value.toString(), true)
case None =>
// if the geometries are not supplied then just leave it
body
}
}
(updatedBody \\ "location").asOpt[String] match {
case Some(value) => updatedBody
case None =>
(updatedBody \\ "location").asOpt[JsValue] match {
case Some(value) =>
Utils.insertIntoJson(updatedBody, "location", value.toString(), true)
case None => updatedBody
}
}
(updatedBody \\ "suggestedFix").asOpt[String] match {
case Some(value) => updatedBody
case None =>
(updatedBody \\ "suggestedFix").asOpt[JsValue] match {
case Some(value) =>
Utils.insertIntoJson(updatedBody, "suggestedFix", value.toString(), true)
case None => updatedBody
}
}
}
/**
* Function can be implemented to extract more information than just the default create data,
* to build other objects with the current object at the core. No data will be returned from this
* function, it purely does work in the background AFTER creating the current object
*
* @param body The Json body of data
* @param createdObject The object that was created by the create function
* @param user The user that is executing the function
*/
override def extractAndCreate(body: JsValue, createdObject: Task, user: User)(
implicit c: Option[Connection] = None
): Unit =
this.extractTags(body, createdObject, User.superUser, true)
/**
* Gets a json list of tags of the task
*
* @param id The id of the task containing the tags
* @return The html Result containing json array of tags
*/
def getTagsForTask(implicit id: Long): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.userAwareRequest { implicit user =>
Ok(Json.toJson(this.getTags(id)))
}
}
/**
* Start on task (lock it). An error will be returned if someone else has the lock.
*
* @param taskId Id of task that you wish to start
* @return
*/
def startOnTask(taskId: Long): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
val task = this.dal.retrieveById(taskId) match {
case Some(t) => t
case None => throw new NotFoundException(s"Task with $taskId not found, unable to lock.")
}
val success = this.dal.lockItem(user, task)
if (success == 0) {
throw new IllegalAccessException(s"Current task [${taskId}] is locked by another user.")
}
webSocketProvider.sendMessage(
WebSocketMessages.taskClaimed(task, Some(WebSocketMessages.userSummary(user)))
)
Ok(Json.toJson(task))
}
}
/**
* Releases the task (unlock it).
*
* @param taskId Id of task that you wish to release
* @return
*/
def releaseTask(taskId: Long): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
val task = this.dal.retrieveById(taskId) match {
case Some(t) => t
case None => throw new NotFoundException(s"Task with $taskId not found, unable to lock.")
}
try {
this.dal.unlockItem(user, task)
webSocketProvider.sendMessage(
WebSocketMessages.taskReleased(task, Some(WebSocketMessages.userSummary(user)))
)
} catch {
case e: Exception => logger.warn(e.getMessage)
}
Ok(Json.toJson(task))
}
}
/**
* Refresh the active lock on the task, extending its allowed duration
*
* @param taskId Id of the task on which the lock is to be refreshed
* @return
*/
def refreshTaskLock(taskId: Long): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
this.dal.retrieveById(taskId) match {
case Some(t) =>
try {
this.dal.refreshItemLock(user, t)
Ok(Json.toJson(t))
} catch {
case e: LockedException => throw new IllegalAccessException(e.getMessage)
}
case None =>
throw new NotFoundException(s"Task with $taskId not found, unable to refresh lock.")
}
}
}
/**
* Gets a random task(s) given the provided tags.
*
* @param projectSearch Filter on the name of the project
* @param challengeSearch Filter on the name of the challenge (Survey included)
* @param challengeTags Filter on the tags of the challenge
* @param tags A comma separated list of tags to match against
* @param taskSearch Filter based on the name of the task
* @param limit The number of tasks to return
* @param proximityId Id of task that you wish to find the next task based on the proximity of that task
* @return
*/
def getRandomTasks(
projectSearch: String,
challengeSearch: String,
challengeTags: String,
tags: String,
taskSearch: String,
limit: Int,
proximityId: Long
): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.userAwareRequest { implicit user =>
val params = SearchParameters(
projectSearch = Some(projectSearch),
challengeParams = SearchChallengeParameters(
challengeSearch = Some(challengeSearch),
challengeTags = Some(challengeTags.split(",").toList)
),
taskTags = Some(tags.split(",").toList),
taskSearch = Some(taskSearch)
)
val result = this.dal.getRandomTasks(
User.userOrMocked(user),
params,
limit,
None,
Utils.negativeToOption(proximityId)
)
result.map(task => {
this.actionManager.setAction(user, this.itemType.convertToItem(task.id), TaskViewed(), "")
this.inject(task)
})
Ok(Json.toJson(result))
}
}
/**
* This injection method will make a call to Mapillary to pull in any matching images that
* might be useful
*
* @param obj the object being sent in the response
* @return A Json representation of the object
*/
override def inject(obj: Task)(implicit request: Request[Any]): JsValue = {
var taskToReturn = obj
val serverInfo = config.getMapillaryServerInfo
if (serverInfo.clientId.nonEmpty) {
if (request.getQueryString("mapillary").getOrElse("false").toBoolean) {
// build the envelope for the task geometries
val taskFeatureCollection =
GeoJSONFactory.create(obj.geometries).asInstanceOf[FeatureCollection]
val reader = new GeoJSONReader()
val envelope = new Envelope()
taskFeatureCollection.getFeatures.foreach(f => {
val current = reader.read(f.getGeometry)
envelope.expandToInclude(current.getEnvelopeInternal)
})
// user can provide border information in the query string, so check there first before using the default
val borderExpansionSize =
request.getQueryString("border").getOrElse(serverInfo.border.toString).toDouble
envelope.expandBy(borderExpansionSize)
val apiReq =
s"https://${serverInfo.host}/v3/images/?&bbox=${envelope.getMinX},${envelope.getMinY},${envelope.getMaxX},${envelope.getMaxY}&client_id=${serverInfo.clientId}"
logger.debug(s"Requesting Mapillary image information for: $apiReq")
val mapFuture = wsClient.url(apiReq).get()
val response = Await.result(mapFuture, 5.seconds)
val featureCollection = response.json
val images = (featureCollection \\ "features")
.as[List[JsValue]]
.map(feature => {
val key = (feature \\ "properties" \\ "key").get.as[String]
val latlon = (feature \\ "geometry" \\ "coordinates").as[List[JsNumber]]
MapillaryImage(
key,
latlon.tail.head.as[Double],
latlon.head.as[Double],
s"https://d1cuyjsrcm0gby.cloudfront.net/$key/thumb-320.jpg",
s"https://d1cuyjsrcm0gby.cloudfront.net/$key/thumb-640.jpg",
s"https://d1cuyjsrcm0gby.cloudfront.net/$key/thumb-1024.jpg",
s"https://d1cuyjsrcm0gby.cloudfront.net/$key/thumb-2048.jpg"
)
})
taskToReturn = obj.copy(mapillaryImages = Some(images))
}
}
val tags = tagDAL.listByTask(taskToReturn.id)
Utils.insertIntoJson(Json.toJson(taskToReturn), Tag.KEY, Json.toJson(tags.map(_.name)))
}
/**
* Gets all the tasks within a bounding box
*
* @param left The minimum latitude for the bounding box
* @param bottom The minimum longitude for the bounding box
* @param right The maximum latitude for the bounding box
* @param top The maximum longitude for the bounding box
* @param limit Limit for the number of returned tasks
* @param offset The offset used for paging
* @return
*/
def getTasksInBoundingBox(
left: Double,
bottom: Double,
right: Double,
top: Double,
limit: Int,
offset: Int,
excludeLocked: Boolean,
sort: String = "",
order: String = "ASC",
includeTotal: Boolean = false,
includeGeometries: Boolean = false
): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.userAwareRequest { implicit user =>
SearchParameters.withSearch { p =>
val params = p.copy(location = Some(SearchLocation(left, bottom, right, top)))
val (count, result) = this.dalManager.taskCluster.getTasksInBoundingBox(
User.userOrMocked(user),
params,
limit,
offset,
excludeLocked,
sort,
order
)
val resultJson = _insertExtraJSON(result, includeGeometries)
if (includeTotal) {
Ok(Json.obj("total" -> count, "tasks" -> resultJson))
} else {
Ok(resultJson)
}
}
}
}
/**
* This is the generic function that is leveraged by all the specific functions above. So it
* sets the task status to the specific status ID's provided by those functions.
* Must be authenticated to perform operation
*
* @param id The id of the task
* @param status The status id to set the task's status to
* @param comment An optional comment to add to the task
* @param tags Optional tags to add to the task
* @return 400 BadRequest if status id is invalid or task with supplied id not found.
* If successful then 200 NoContent
*/
def setTaskStatus(
id: Long,
status: Int,
comment: String = "",
tags: String = ""
): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
val requestReview = request.getQueryString("requestReview") match {
case Some(v) => Some(v.toBoolean)
case None => None
}
val completionResponses = request.body.asJson
this.customTaskStatus(
id,
TaskStatusSet(status),
user,
comment,
tags,
requestReview,
completionResponses
)
NoContent
}
}
def customTaskStatus(
taskId: Long,
actionType: ActionType,
user: User,
comment: String = "",
tags: String = "",
requestReview: Option[Boolean] = None,
completionResponses: Option[JsValue] = None
) = {
val status = actionType match {
case t: TaskStatusSet => t.status
case q: QuestionAnswered => Task.STATUS_ANSWERED
case _ => Task.STATUS_CREATED
}
if (!Task.isValidStatus(status)) {
throw new InvalidException(s"Cannot set task [$taskId] to invalid status [$status]")
}
val task = this.dal.retrieveById(taskId) match {
case Some(t) => t
case None => throw new NotFoundException(s"Task with $taskId not found, can not set status.")
}
this.dal.setTaskStatus(List(task), status, user, requestReview, completionResponses)
val action =
this.actionManager.setAction(Some(user), new TaskItem(task.id), actionType, task.name)
// add comment if any provided
if (comment.nonEmpty) {
val actionId = action match {
case Some(a) => Some(a.id)
case None => None
}
this.dalManager.comment.add(user, task, comment, actionId)
}
val tagList = tags.split(",").toList
if (tagList.nonEmpty) {
this.addTagstoItem(taskId, tagList.map(new Tag(-1, _, tagType = this.dal.tableName)), user)
}
}
/**
* This function sets the task review status.
* Must be authenticated to perform operation and marked as a reviewer.
*
* @param id The id of the task
* @param reviewStatus The review status id to set the task's review status to
* @param comment An optional comment to add to the task
* @param tags Optional tags to add to the task
* @return 400 BadRequest if task with supplied id not found.
* If successful then 200 NoContent
*/
def setTaskReviewStatus(
id: Long,
reviewStatus: Int,
comment: String = "",
tags: String = ""
): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
val task = this.dal.retrieveById(id) match {
case Some(t) => t
case None =>
throw new NotFoundException(s"Task with $id not found, cannot set review status.")
}
val action = this.actionManager
.setAction(Some(user), new TaskItem(task.id), TaskReviewStatusSet(reviewStatus), task.name)
val actionId = action match {
case Some(a) => Some(a.id)
case None => None
}
this.dalManager.taskReview.setTaskReviewStatus(task, reviewStatus, user, actionId, comment)
val tagList = tags.split(",").toList
if (tagList.nonEmpty) {
this.addTagstoItem(id, tagList.map(new Tag(-1, _, tagType = this.dal.tableName)), user)
}
NoContent
}
}
/**
* Changes the status on tasks that meet the search criteria (SearchParameters)
*
* @param newStatus The status to change all the tasks to
* @return The number of tasks changed.
*/
def bulkStatusChange(newStatus: Int): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedRequest { implicit user =>
SearchParameters.withSearch { p =>
var params = p
params.location match {
case Some(l) => // do nothing, already have bounding box
case None =>
// No bounding box, so search everything
params = p.copy(location = Some(SearchLocation(-180, -90, 180, 90)))
}
val (count, tasks) = this.dalManager.taskCluster.getTasksInBoundingBox(user, params, -1)
tasks.foreach(task => {
val taskJson = Json.obj("id" -> task.id, "status" -> newStatus)
this.dal.update(taskJson, user)(task.id)
})
Ok(Json.toJson(tasks.length))
}
}
}
/**
* Matches the task to a OSM Changeset, this will only
*
* @param taskId the id for the task
* @return The new Task object
*/
def matchToOSMChangeSet(taskId: Long): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.authenticatedFutureRequest { implicit user =>
this.dal.retrieveById(taskId) match {
case Some(t) =>
val promise = Promise[Result]
this.dal.matchToOSMChangeSet(t, user, false) onComplete {
case Success(response) => promise success Ok(Json.toJson(t))
case Failure(error) => promise failure error
}
promise.future
case None => throw new NotFoundException("Task not found to update taskId with")
}
}
}
/**
* Gets clusters of tasks for the challenge. Uses kmeans method in postgis.
*
* @param numberOfPoints Number of clustered points you wish to have returned
* @return A list of ClusteredPoint's that represent clusters of tasks
*/
def getTaskClusters(numberOfPoints: Int): Action[AnyContent] = Action.async { implicit request =>
this.sessionManager.userAwareRequest { implicit user =>
SearchParameters.withSearch { implicit params =>
Ok(Json.toJson(this.dalManager.taskCluster.getTaskClusters(params, numberOfPoints)))
}
}
}
/**
* Gets the list of tasks that are contained within the single cluster
*
* @param clusterId The cluster id, when "getTaskClusters" is executed it will return single point clusters
* representing all the tasks in the cluster. Each cluster will contain an id, supplying
* that id to this method will allow you to retrieve all the tasks in the cluster
* @param numberOfPoints Number of clustered points that was originally used to get all the clusters
* @return A list of ClusteredPoint's that represent each of the tasks within a single cluster
*/
def getTasksInCluster(clusterId: Int, numberOfPoints: Int): Action[AnyContent] = Action.async {
implicit request =>
this.sessionManager.userAwareRequest { implicit user =>
SearchParameters.withSearch { implicit params =>
Ok(
Json.toJson(
this.dalManager.taskCluster.getTasksInCluster(clusterId, params, numberOfPoints)
)
)
}
}
}
def applyTagFix(taskId: Long, comment: String = "", tags: String = ""): Action[JsValue] =
Action.async(bodyParsers.json) { implicit request =>
this.sessionManager.authenticatedFutureRequest { implicit user =>
val result = request.body.validate[TagChangeSubmission]
result.fold(
errors => {
Future {
BadRequest(Json.toJson(StatusMessage("KO", JsError.toJson(errors))))
}
},
element => {
val p = Promise[Result]
val requestReview = request.getQueryString("requestReview") match {
case Some(v) => Some(v.toBoolean)
case None => None
}
// Convert tag changes to OSMChange object
val updates = element.changes.map(tagChange => {
ElementUpdate(
tagChange.osmId,
tagChange.osmType,
tagChange.version,
ElementTagChange(tagChange.updates, tagChange.deletes)
)
})
val change = OSMChange(None, Some(updates))
config.skipOSMChangesetSubmission match {
// If we are skipping the OSM submission then we don't actually do the tag change on OSM
case true =>
this.customTaskStatus(
taskId,
TaskStatusSet(Task.STATUS_FIXED),
user,
comment,
tags,
requestReview
)
p success Ok(Json.toJson(true))
case _ =>
None
changeService.submitOsmChange(
change,
element.comment,
user.osmProfile.requestToken,
Some(taskId)
) onComplete {
case Success(res) => {
this.customTaskStatus(
taskId,
TaskStatusSet(Task.STATUS_FIXED),
user,
comment,
tags,
requestReview
)
p success Ok(res)
}
case Failure(f) => p failure f
}
}
p.future
}
)
}
}
def applySuggestedFix(
taskId: Long,
comment: String = "",
tags: String = ""
): Action[JsValue] = Action.async(bodyParsers.json) { implicit request =>
this.sessionManager.authenticatedFutureRequest { implicit user =>
val result = request.body.validate[OSMChangeSubmission]
result.fold(
errors => {
Future {
BadRequest(Json.toJson(StatusMessage("KO", JsError.toJson(errors))))
}
},
element => {
val p = Promise[Result]
val requestReview = request.getQueryString("requestReview") match {
case Some(v) => Some(v.toBoolean)
case None => None
}
config.skipOSMChangesetSubmission match {
// If we are skipping the OSM submission then we don't actually do the tag change on OSM
case true =>
this.customTaskStatus(
taskId,
TaskStatusSet(Task.STATUS_FIXED),
user,
comment,
tags,
requestReview
)
p success Ok(Json.toJson(true))
case _ =>
None
changeService.submitOsmChange(
element.changes,
element.comment,
user.osmProfile.requestToken,
Some(taskId)
) onComplete {
case Success(res) => {
this.customTaskStatus(
taskId,
TaskStatusSet(Task.STATUS_FIXED),
user,
comment,
tags,
requestReview
)
p success Ok(res)
}
case Failure(f) => p failure f
}
}
p.future
}
)
}
}
/**
* Fetches and inserts usernames for 'reviewRequestedBy' and 'reviewBy' into
* the ClusteredPoint.pointReview
*/
private def _insertExtraJSON(
tasks: List[ClusteredPoint],
includeGeometries: Boolean = false
): JsValue = {
if (tasks.isEmpty) {
Json.toJson(List[JsValue]())
} else {
val mappers = Some(
this.dalManager.user
.retrieveListById(-1, 0)(tasks.map(t => t.completedBy.getOrElse(0L)))
.map(u => u.id -> Json.obj("username" -> u.name, "id" -> u.id))
.toMap
)
val reviewRequesters = Some(
this.dalManager.user
.retrieveListById(-1, 0)(tasks.map(t => t.pointReview.reviewRequestedBy.getOrElse(0L)))
.map(u => u.id -> Json.obj("username" -> u.name, "id" -> u.id))
.toMap
)
val reviewers = Some(
this.dalManager.user
.retrieveListById(-1, 0)(tasks.map(t => t.pointReview.reviewedBy.getOrElse(0L)))
.map(u => u.id -> Json.obj("username" -> u.name, "id" -> u.id))
.toMap
)
val taskDetailsMap: Map[Long, Task] =
includeGeometries match {
case true =>
val taskDetails = this.dalManager.task.retrieveListById()(tasks.map(t => t.id))
taskDetails.map(t => (t.id -> t)).toMap
case false => null
}
val jsonList = tasks.map { task =>
var updated = Json.toJson(task)
var reviewPointJson = Json.toJson(task.pointReview).as[JsObject]
if (task.completedBy.getOrElse(0) != 0) {
val mappersJson = Json.toJson(mappers.get(task.completedBy.get)).as[JsObject]
updated = Utils.insertIntoJson(updated, "completedBy", mappersJson, true)
}
if (task.pointReview.reviewRequestedBy.getOrElse(0) != 0) {
val reviewRequestersJson =
Json.toJson(reviewRequesters.get(task.pointReview.reviewRequestedBy.get)).as[JsObject]
reviewPointJson = Utils
.insertIntoJson(reviewPointJson, "reviewRequestedBy", reviewRequestersJson, true)
.as[JsObject]
updated = Utils.insertIntoJson(updated, "pointReview", reviewPointJson, true)
}
if (task.pointReview.reviewedBy.getOrElse(0) != 0) {
var reviewerJson =
Json.toJson(reviewers.get(task.pointReview.reviewedBy.get)).as[JsObject]
reviewPointJson =
Utils.insertIntoJson(reviewPointJson, "reviewedBy", reviewerJson, true).as[JsObject]
updated = Utils.insertIntoJson(updated, "pointReview", reviewPointJson, true)
}
if (includeGeometries) {
val geometries = Json.parse(taskDetailsMap(task.id).geometries)
updated = Utils.insertIntoJson(updated, "geometries", geometries, true)
}
updated
}
Json.toJson(jsonList)
}
}
}
| Crashfreak/maproulette2 | app/org/maproulette/controllers/api/TaskController.scala | Scala | apache-2.0 | 30,243 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import org.apache.spark._
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.{IndexShuffleBlockResolver, ShuffleWriter, BaseShuffleHandle}
import org.apache.spark.storage.ShuffleBlockId
import org.apache.spark.util.collection.ExternalSorter
private[spark] class SortShuffleWriter[K, V, C](
shuffleBlockResolver: IndexShuffleBlockResolver,
handle: BaseShuffleHandle[K, V, C],
mapId: Int,
context: TaskContext)
extends ShuffleWriter[K, V] with Logging {
private val dep = handle.dependency
private val blockManager = SparkEnv.get.blockManager
private var sorter: SortShuffleFileWriter[K, V] = null
// Are we in the process of stopping? Because map tasks can call stop() with success = true
// and then call stop() with success = false if they get an exception, we want to make sure
// we don't try deleting files, etc twice.
private var stopping = false
private var mapStatus: MapStatus = null
private val writeMetrics = new ShuffleWriteMetrics()
context.taskMetrics.shuffleWriteMetrics = Some(writeMetrics)
/** Write a bunch of records to this task's output */
override def write(records: Iterator[Product2[K, V]]): Unit = {
sorter = if (dep.mapSideCombine) {
require(dep.aggregator.isDefined, "Map-side combine without Aggregator specified!")
new ExternalSorter[K, V, C](
dep.aggregator, Some(dep.partitioner), dep.keyOrdering, dep.serializer)
} else if (SortShuffleWriter.shouldBypassMergeSort(
SparkEnv.get.conf, dep.partitioner.numPartitions, aggregator = None, keyOrdering = None)) {
// If there are fewer than spark.shuffle.sort.bypassMergeThreshold partitions and we don't
// need local aggregation and sorting, write numPartitions files directly and just concatenate
// them at the end. This avoids doing serialization and deserialization twice to merge
// together the spilled files, which would happen with the normal code path. The downside is
// having multiple files open at a time and thus more memory allocated to buffers.
new BypassMergeSortShuffleWriter[K, V](SparkEnv.get.conf, blockManager, dep.partitioner,
writeMetrics, Serializer.getSerializer(dep.serializer))
} else {
// In this case we pass neither an aggregator nor an ordering to the sorter, because we don't
// care whether the keys get sorted in each partition; that will be done on the reduce side
// if the operation being run is sortByKey.
new ExternalSorter[K, V, V](
aggregator = None, Some(dep.partitioner), ordering = None, dep.serializer)
}
sorter.insertAll(records)
// Don't bother including the time to open the merged output file in the shuffle write time,
// because it just opens a single file, so is typically too fast to measure accurately
// (see SPARK-3570).
val outputFile = shuffleBlockResolver.getDataFile(dep.shuffleId, mapId)
val blockId = ShuffleBlockId(dep.shuffleId, mapId, IndexShuffleBlockResolver.NOOP_REDUCE_ID)
val partitionLengths = sorter.writePartitionedFile(blockId, context, outputFile)
shuffleBlockResolver.writeIndexFile(dep.shuffleId, mapId, partitionLengths)
mapStatus = MapStatus(blockManager.shuffleServerId, partitionLengths)
}
/** Close this writer, passing along whether the map completed */
override def stop(success: Boolean): Option[MapStatus] = {
try {
if (stopping) {
return None
}
stopping = true
if (success) {
return Option(mapStatus)
} else {
// The map task failed, so delete our output data.
shuffleBlockResolver.removeDataByMap(dep.shuffleId, mapId)
return None
}
} finally {
// Clean up our sorter, which may have its own intermediate files
if (sorter != null) {
val startTime = System.nanoTime()
sorter.stop()
context.taskMetrics.shuffleWriteMetrics.foreach(
_.incShuffleWriteTime(System.nanoTime - startTime))
sorter = null
}
}
}
}
private[spark] object SortShuffleWriter {
def shouldBypassMergeSort(
conf: SparkConf,
numPartitions: Int,
aggregator: Option[Aggregator[_, _, _]],
keyOrdering: Option[Ordering[_]]): Boolean = {
val bypassMergeThreshold: Int = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
numPartitions <= bypassMergeThreshold && aggregator.isEmpty && keyOrdering.isEmpty
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala | Scala | apache-2.0 | 5,409 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle.util
import annotation.tailrec
import java.io.InputStream
import org.saddle.vec
/**
* The Random class provides methods to generate pseudo-random numbers via a plug-in
* PRNG, which is simply any function that generates a Long primitive.
*/
class Random private (rng64: () => Long) {
/**
* Generate a new integer (taking the 32 low order bits of the
* 64 bit state)
*/
def nextInt: Int = rng64().asInstanceOf[Int]
/**
* Generate a new long
*/
def nextLong: Long = rng64()
/**
* Generate a new float
*/
def nextFloat: Float = nextInt.asInstanceOf[Float] / Int.MaxValue
/**
* Generate a new double
*/
def nextDouble: Double = nextLong.asInstanceOf[Double] / Long.MaxValue
/**
* Generate a new non-negative integer
*/
@tailrec final def nextNonNegInt: Int = {
val tmp = nextInt
if (tmp >= 0) tmp else nextNonNegInt
}
/**
* Generate a new non-negative long
*/
@tailrec final def nextNonNegLong: Long = {
val tmp = nextLong
if (tmp >= 0) tmp else nextNonNegLong
}
/**
* Generate a new non-negative float
*/
@tailrec final def nextNonNegFloat: Float = {
val tmp = nextFloat
if (tmp >= 0) tmp else nextNonNegFloat
}
/**
* Generate a new non-negative double
*/
@tailrec final def nextNonNegDouble: Double = {
val tmp = nextDouble
if (tmp >= 0) tmp else nextNonNegDouble
}
private var next = Double.NaN
/**
* Generate a new Gaussian (normally distributed) number
*
* This is based on Apache Commons Math's nextGaussian, which in turn is based
* on the Polar Method of Box, Muller, & Marsiglia as described in Knuth 3.4.1C
*/
@tailrec final def nextGaussian: Double = {
if (next == next) {
val tmp = next
next = Double.NaN
tmp
}
else {
val u1 = 2.0 * nextDouble - 1.0
val u2 = 2.0 * nextDouble - 1.0
val s = u1 * u1 + u2 * u2
if (s >= 1) nextGaussian
else {
val bm = if (s != 0) { math.sqrt(-2.0 * math.log(s) / s) } else s
next = u1 * bm
u2 * bm
}
}
}
}
object Random {
/**
* Create Random instance
*/
def apply() = new Random(XorShift(new java.util.Random().nextLong))
/**
* Create Random instance from provided seed
*/
def apply(seed: Long) = new Random(XorShift(new java.util.Random(seed).nextLong))
/**
* Create Random instance from custom RNG function
*/
def apply(rng: () => Long) = new Random(rng)
}
/**
* Marsaglia XorShift PRNG
*
* See [[http://www.jstatsoft.org/v08/i14/ Marsaglia]]
*/
object XorShift {
def apply(): () => Long = apply(new java.util.Random().nextLong)
def apply(seed: Long): () => Long = makeRNG((13,7,17), seed)
def makeRNG(tup: (Int, Int, Int), seed: Long): () => Long = {
var seedL = seed
val (a, b, c) = tup
() => seedL ^= (seedL << a); seedL ^= (seedL >> b); seedL ^= (seedL << c); seedL
}
}
/**
* Marsaglia Lagged Fibonacci PRNG
*
* See [[https://groups.google.com/forum/?fromgroups=#!msg/sci.crypt/yoaCpGWKEk0/UXCxgufdTesJ]]
*/
object LFib4 {
def apply(): () => Long = apply(new java.util.Random().nextLong)
def apply(seed: Long): () => Long = makeRNG(seed)
def makeRNG(seed: Long): () => Long = {
val jrand = new java.util.Random(seed)
val state = Array.ofDim[Long](256) // 2K of memory
for (i <- 0 until 256) state(i) = jrand.nextLong
var c = 0
() => {
c += 1
c &= 0xFF
state(c) = state(c) + state((c+58) & 0xFF) + state((c+119) & 0xFF) + state((c+178) & 0xFF)
state(c)
}
}
}
/**
* Ziff 4-tap shift-register-sequence
*
* http://arxiv.org/pdf/cond-mat/9710104v1.pdf
* http://www.aip.org/cip/pdf/vol_12/iss_4/385_1.pdf
*/
object Ziff98 {
def apply(): () => Long = apply(new java.util.Random().nextLong)
def apply(seed: Long): () => Long = makeRNG(seed)
def makeRNG(seed: Long): () => Long = {
val (a, b, c, d, m) = (471, 1586, 6988, 9689, 16383)
val jrand = new java.util.Random(seed)
val state = Array.ofDim[Long](m + 1) // 128K of memory
var nd = 0
for (i <- 0 until m) state(i) = jrand.nextLong
() => {
nd += 1
val (a1, b1, c1, d1, e1) = (nd & m, (nd - a) & m, (nd - b) & m, (nd - c) & m, (nd - d) & m)
state(a1) = state(b1) ^ state(c1) ^ state(d1) ^ state(e1)
state(a1)
}
}
}
/**
* Create a random InputStream of bytes from a PRNG. Useful for testing, e.g.,
* for feeding into dieharder battery of tests via stdin.
*/
case class RandomStream(rng: () => Long) extends InputStream {
var c = 0
var r = rng()
def read(): Int = {
c += 1
val byte = c match {
case 1 => r
case 2 => r >>> 8
case 3 => r >>> 16
case 4 => r >>> 24
case 5 => r >>> 32
case 6 => r >>> 40
case 7 => r >>> 48
case 8 => c = 0; val tmp = (r >>> 56); r = rng(); tmp
}
(byte & 0xFF).asInstanceOf[Int]
}
}
| jyt109/saddle | saddle-core/src/main/scala/org/saddle/util/Random.scala | Scala | apache-2.0 | 5,569 |
import play.api.GlobalSettings
import play.api.mvc.WithFilters
import play.filters.gzip.GzipFilter
object Global extends WithFilters(new GzipFilter) with GlobalSettings {
} | betygen/web | app/Global.scala | Scala | mit | 173 |
package com.aristocrat.mandrill.requests.Messages
import com.aristocrat.mandrill.requests.MandrillRequest
case class Parse(key: String, rawMessage: String) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Messages/Parse.scala | Scala | mit | 182 |
package com.skn.common.view.model
import com.skn.api.view.jsonapi.JsonApiModel.ObjectKey
import com.skn.api.view.jsonapi.JsonApiValueModel.JsonApiNumber
import com.skn.api.view.model.ViewItem
import scala.language.experimental.macros
/**
*
* Created by Sergey on 04.10.2016.
*/
case class PersonView(name: String, test: Number, id: Option[Long] = None)
extends ViewItem { val key = ObjectKey("person", id.map(JsonApiNumber(_))) }
| AlexeyIvanov8/json-api-mapper | src/test/scala/com/skn/common/view/model/PersonView.scala | Scala | gpl-3.0 | 440 |
package recfun
import scala.annotation.tailrec
object Main {
def main(args: Array[String]) {
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
}
/**
* Exercise 1
*/
def pascal(c: Int, r: Int): Int = {
if(c == 0 || c == r) 1
else pascal(c-1, r-1) + pascal(c, r-1)
}
/**
* Exercise 2
*/
def balance(chars: List[Char]): Boolean = {
val onlyBrackets = chars.filter(c => '('.equals(c) || ')'.equals(c))
@tailrec
def isBalanced(status : Int, chars : List[Char]) : Boolean = {
if(chars.isEmpty) status == 0
else status >= 0 && isBalanced({if(chars.head == ')') status - 1 else status + 1}, chars.tail)
}
onlyBrackets.size % 2 == 0 && isBalanced(0,onlyBrackets)
}
/**
* Exercise 3
*/
def countChange(money: Int, coins: List[Int]): Int = if(coins.isEmpty) 0 else {
if(money==0) 1 else if(money < 0) 0
else countChange(money - coins.head, coins) + countChange(money, coins.tail)
}
}
| iTsFILIPOficial/coursera-scala | Week 1/src/main/scala/recfun/Main.scala | Scala | mit | 1,107 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.wrappers
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.util.Identifiable
import org.apache.spark.sql.types.StructType
import org.apache.spark.{ml, sql}
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.{Transformer, Estimator}
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import io.deepsense.deeplang.params.wrappers.deeplang.ParamWrapper
import io.deepsense.sparkutils.ML
class EstimatorWrapper(
executionContext: ExecutionContext,
estimator: Estimator[Transformer])
extends ML.Estimator[TransformerWrapper] {
override def fitDF(dataset: sql.DataFrame): TransformerWrapper = {
new TransformerWrapper(
executionContext,
estimator._fit(executionContext, DataFrame.fromSparkDataFrame(dataset.toDF())))
}
override def copy(extra: ParamMap): EstimatorWrapper = {
val params = ParamTransformer.transform(extra)
val estimatorCopy = estimator.replicate().set(params: _*)
new EstimatorWrapper(executionContext, estimatorCopy)
}
override def transformSchema(schema: StructType): StructType = {
schema
}
override lazy val params: Array[ml.param.Param[_]] = {
estimator.params.map(new ParamWrapper(uid, _))
}
override val uid: String = Identifiable.randomUID("EstimatorWrapper")
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperables/wrappers/EstimatorWrapper.scala | Scala | apache-2.0 | 1,956 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom
import akka.actor.ActorSystem
import com.datastax.driver.core.{Session, Statement}
import com.outworkers.phantom.builder.batch.BatchType
import com.outworkers.phantom.builder.query.prepared.ExecutablePreparedSelectQuery
import com.outworkers.phantom.builder.query.{RootSelectBlock, SelectQuery}
import com.outworkers.phantom.builder.{ConsistencyBound, LimitBound, OrderBound, WhereBound}
import com.outworkers.phantom.connectors.KeySpace
import com.outworkers.phantom.dsl.{context => _, _}
import com.outworkers.phantom.streams.iteratee.{Enumerator, Iteratee => PhantomIteratee}
import com.outworkers.phantom.streams.lib.EnumeratorPublisher
import org.reactivestreams.Publisher
import play.api.libs.iteratee.{Enumeratee, Enumerator => PlayEnumerator}
import shapeless.HList
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
/**
* Just a wrapper module for enhancing phantom [[CassandraTable]]
* with reactive streams features.
*
* In order to be used, please be sured to import the implicits
* into the scope.
*
* {{{
* import ReactiveCassandra._
* val subscriber = CassandraTableInstance.subscriber()
* }}}
*
* @see [[http://www.reactive-streams.org/]]
* @see [[https://github.com/websudos/phantom]]
*/
package object streams {
/**
* Adapt an Enumerator to a Publisher. Each Subscriber will be
* adapted to an Iteratee and applied to the Enumerator. Input of
* type Input.El will result in calls to onNext.
*
* Either onError or onComplete will always be invoked as the
* last call to the subscriber, the former happening if the
* enumerator fails with an error, the latter happening when
* the first of either Input.EOF is fed, or the enumerator
* completes.
*
* If emptyElement is None then Input of type Input.Empty will
* be ignored. If it is set to Some(x) then it will call onNext
* with the value x.
*/
def enumeratorToPublisher[T](enum: PlayEnumerator[T], emptyElement: Option[T] = None): Publisher[T] = {
new EnumeratorPublisher(enum, emptyElement)
}
private[this] final val DEFAULT_CONCURRENT_REQUESTS = 5
private[this] final val DEFAULT_BATCH_SIZE = 100
/**
* @tparam CT the concrete type inheriting from [[CassandraTable]]
* @tparam T the type of the streamed element
*/
implicit class StreamedCassandraTable[CT <: CassandraTable[CT, T], T](
val ct: CassandraTable[CT, T]
) extends AnyVal {
/**
* Gets a reactive streams [[org.reactivestreams.Subscriber]] with
* batching capabilities for some phantom [[CassandraTable]]. This
* subscriber is able to work for both finite short-lived streams
* and never-ending long-lived streams. For the latter, a flushInterval
* parameter can be used.
*
* @param batchSize the number of elements to include in the Cassandra batch
* @param concurrentRequests the number of concurrent batch operations
* @param batchType the type of the batch.
* @see See [[http://docs.datastax.com/en/cql/3.1/cql/cql_reference/batch_r.html]] for further
* explanation.
* @param flushInterval used to schedule periodic batch execution even though the number of statements hasn't
* been reached yet. Useful in never-ending streams that will never been completed.
* @param completionFn a function that will be invoked when the stream is completed
* @param errorFn a function that will be invoked when an error occurs
* @param builder an implicitly resolved [[RequestBuilder]] that wraps a phantom [[com.outworkers.phantom.builder.query.execution.ExecutableCqlQuery]].
* Every T element that gets into the stream from the upstream is turned into a ExecutableStatement
* by means of this builder.
* @param system the underlying [[ActorSystem]]. This [[org.reactivestreams.Subscriber]] implementation uses Akka
* actors, but is not restricted to be used in the context of Akka Streams.
* @param session the Cassandra [[com.datastax.driver.core.Session]]
* @param space the Cassandra [[KeySpace]]
* @param ev an evidence to get the T type removed by erasure
* @return the [[org.reactivestreams.Subscriber]] to be connected to a reactive stream typically initiated by
* a [[org.reactivestreams.Publisher]]
*/
def subscriber(
batchSize: Int = DEFAULT_BATCH_SIZE,
concurrentRequests: Int = DEFAULT_CONCURRENT_REQUESTS,
batchType: BatchType = BatchType.Unlogged,
flushInterval: Option[FiniteDuration] = None,
completionFn: () => Unit = () => (),
errorFn: Throwable => Unit = _ => ()
)(implicit builder: RequestBuilder[CT, T],
system: ActorSystem,
session: Session,
space: KeySpace,
ev: Manifest[T]
): BatchSubscriber[CT, T] = {
new BatchSubscriber[CT, T](
ct.asInstanceOf[CT],
builder,
batchSize,
concurrentRequests,
batchType,
flushInterval,
completionFn,
errorFn
)
}
/**
* Creates a stream publisher based on the default ReactiveStreams implementation.
* This will use the underlying Play enumerator model to convert.
*
* @param session The Cassandra session to execute the enumeration within.
* @param keySpace The target keyspace.
* @return A publisher of records, publishing one record at a time.
*/
def publisher()(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): Publisher[T] = {
enumeratorToPublisher(ct.select.all().fetchEnumerator())
}
}
implicit class PublisherConverter[T](val enumerator: PlayEnumerator[T]) extends AnyVal {
def publisher: Publisher[T] = enumeratorToPublisher(enumerator)
}
/**
* Returns the product of the arguments,
* throwing an exception if the result overflows a [[scala.Long]].
*
* @param x the first value
* @param y the second value
* @return the result
* @throws ArithmeticException if the result overflows a long
* @since 1.8
*/
def multiplyExact(x: Long, y: Long): Long = {
val r: Long = x * y
val ax: Long = Math.abs(x)
val ay: Long = Math.abs(y)
if (((ax | ay) >> 31) != 0) {
if (((y != 0) && (r / y != x)) || (x == Long.MinValue && y == -1)) {
Long.MaxValue
} else {
r
}
} else {
r
}
}
final val Iteratee = PhantomIteratee
implicit class ExecutableQueryStreamsAugmenter[
T <: CassandraTable[T, R],
R,
Limit <: LimitBound,
Order <: OrderBound,
Status <: ConsistencyBound,
Chain <: WhereBound,
PS <: HList
](val query: SelectQuery[T, R, Limit, Order, Status, Chain, PS]) extends AnyVal {
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
*
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return A play enumerator containing the results of the query.
*/
def fetchEnumerator()(
implicit session: Session,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
query.future() map { res =>
Enumerator.enumerator(res) through Enumeratee.map(query.fromRow)
}
}
}
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
* @param mod A modifier to apply to a statement.
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return A play enumerator containing the results of the query.
*/
def fetchEnumerator(mod: Statement => Statement)(
implicit session: Session,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
query.future(mod) map { res =>
Enumerator.enumerator(res) through Enumeratee.map(query.fromRow)
}
}
}
/**
* Creates a Reactive Streams publisher from a root select block.
* This will create a reactive streams publisher containing all the records found by this [[SelectQuery]].
* @param session The database session in which to execute this.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher()(
implicit session: Session,
ctx: ExecutionContext
): Publisher[R] = enumeratorToPublisher(query.fetchEnumerator())
/**
* Creates a Reactive Streams publisher from a root select block but also
* allows passing through a query modifier.
* This will create a reactive streams publisher containing all the records found by this [[SelectQuery]].
* @param session The database session in which to execute this.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher(modifier: Statement => Statement)(
implicit session: Session,
ctx: ExecutionContext
): Publisher[R] = {
enumeratorToPublisher(query.fetchEnumerator(modifier))
}
}
implicit class RootSelectBlockEnumerator[
T <: CassandraTable[T, _],
R
](val block: RootSelectBlock[T, R]) extends AnyVal {
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
*
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return
*/
def fetchEnumerator()(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
block.all().future() map { res =>
Enumerator.enumerator(res) through Enumeratee.map(block.rowFunc)
}
}
}
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
*
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return
*/
def fetchEnumerator(modifier: Statement => Statement)(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
block.all().future(modifier) map { res =>
Enumerator.enumerator(res) through Enumeratee.map(block.rowFunc)
}
}
}
/**
* Creates a Reactive Streams publisher from a root select block.
* Because this is a [[RootSelectBlock]], the default execution profile
* of this method will be to select all records in a table and stream them.
* @param session The database session in which to execute this.
* @param keySpace The keyspace in which to execute the query.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher()(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): Publisher[R] = enumeratorToPublisher(block.fetchEnumerator())
/**
* Creates a Reactive Streams publisher from a root select block but also
* allows passing through a query modifier.
* Because this is a [[RootSelectBlock]], the default execution profile
* of this method will be to select all records in a table and stream them.
* @param session The database session in which to execute this.
* @param keySpace The keyspace in which to execute the query.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher(modifier: Statement => Statement)(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): Publisher[R] = {
enumeratorToPublisher(block.fetchEnumerator(modifier))
}
}
implicit class PreparedSelectQueryStream[
T <: CassandraTable[T, _],
R,
Limit <: LimitBound
](val block: ExecutablePreparedSelectQuery[T, R, Limit]) extends AnyVal {
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
*
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return
*/
def fetchEnumerator()(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
block.future() map { res =>
Enumerator.enumerator(res) through Enumeratee.map(block.fromRow)
}
}
}
/**
* Produces an Enumerator for [R]ows
* This enumerator can be consumed afterwards with an Iteratee
*
* @param session The Cassandra session in use.
* @param ctx The Execution Context.
* @return
*/
def fetchEnumerator(modifier: Statement => Statement)(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): PlayEnumerator[R] = {
PlayEnumerator.flatten {
block.future(modifier) map { res =>
Enumerator.enumerator(res) through Enumeratee.map(block.fromRow)
}
}
}
/**
* Creates a Reactive Streams publisher from a root select block.
* Because this is a [[RootSelectBlock]], the default execution profile
* of this method will be to select all records in a table and stream them.
* @param session The database session in which to execute this.
* @param keySpace The keyspace in which to execute the query.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher()(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): Publisher[R] = enumeratorToPublisher(block.fetchEnumerator())
/**
* Creates a Reactive Streams publisher from a root select block but also
* allows passing through a query modifier.
* Because this is a [[RootSelectBlock]], the default execution profile
* of this method will be to select all records in a table and stream them.
* @param session The database session in which to execute this.
* @param keySpace The keyspace in which to execute the query.
* @param ctx The execution context to use.
* @return A streams publisher interface clients can subscribe to.
*/
def publisher(modifier: Statement => Statement)(
implicit session: Session,
keySpace: KeySpace,
ctx: ExecutionContext
): Publisher[R] = {
enumeratorToPublisher(block.fetchEnumerator(modifier))
}
}
}
| outworkers/phantom | phantom-streams/src/main/scala/com/outworkers/phantom/streams/package.scala | Scala | apache-2.0 | 15,605 |
package io.udash.bootstrap.utils
import io.udash.bindings.modifiers.Binding
import io.udash.component.Component
import io.udash.wrappers.jquery._
import org.scalajs.dom.Element
/** Base trait for Bootstrap components. */
trait UdashBootstrapComponent extends Component {
override val render: Element
protected class JQueryOnBinding(selector: JQuery, event: EventName, callback: JQueryCallback) extends Binding {
selector.on(event, callback)
override def kill(): Unit = {
super.kill()
selector.off(event, callback)
}
override def applyTo(t: Element): Unit = ()
}
}
| UdashFramework/udash-core | bootstrap4/.js/src/main/scala/io/udash/bootstrap/utils/UdashBootstrapComponent.scala | Scala | apache-2.0 | 603 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.IOException
import java.net.URI
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.{Failure, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.filter2.predicate.FilterApi
import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.ParquetOutputFormat.JobSummaryLevel
import org.apache.parquet.hadoop.codec.CodecConfig
import org.apache.parquet.hadoop.util.ContextUtil
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkException, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.parser.LegacyTypeStringParser
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.vectorized.{OffHeapColumnVector, OnHeapColumnVector}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.{SerializableConfiguration, ThreadUtils}
class ParquetFileFormat
extends FileFormat
with DataSourceRegister
with Logging
with Serializable {
// Hold a reference to the (serializable) singleton instance of ParquetLogRedirector. This
// ensures the ParquetLogRedirector class is initialized whether an instance of ParquetFileFormat
// is constructed or deserialized. Do not heed the Scala compiler's warning about an unused field
// here.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def shortName(): String = "parquet"
override def toString: String = "Parquet"
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[ParquetFileFormat]
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val parquetOptions = new ParquetOptions(options, sparkSession.sessionState.conf)
val conf = ContextUtil.getConfiguration(job)
val committerClass =
conf.getClass(
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key,
classOf[ParquetOutputCommitter],
classOf[OutputCommitter])
if (conf.get(SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key) == null) {
logInfo("Using default output committer for Parquet: " +
classOf[ParquetOutputCommitter].getCanonicalName)
} else {
logInfo("Using user defined output committer for Parquet: " + committerClass.getCanonicalName)
}
conf.setClass(
SQLConf.OUTPUT_COMMITTER_CLASS.key,
committerClass,
classOf[OutputCommitter])
// We're not really using `ParquetOutputFormat[Row]` for writing data here, because we override
// it in `ParquetOutputWriter` to support appending and dynamic partitioning. The reason why
// we set it here is to setup the output committer class to `ParquetOutputCommitter`, which is
// bundled with `ParquetOutputFormat[Row]`.
job.setOutputFormatClass(classOf[ParquetOutputFormat[Row]])
ParquetOutputFormat.setWriteSupportClass(job, classOf[ParquetWriteSupport])
// This metadata is useful for keeping UDTs like Vector/Matrix.
ParquetWriteSupport.setSchema(dataSchema, conf)
// Sets flags for `ParquetWriteSupport`, which converts Catalyst schema to Parquet
// schema and writes actual rows to Parquet files.
conf.set(
SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key,
sparkSession.sessionState.conf.writeLegacyParquetFormat.toString)
conf.set(
SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key,
sparkSession.sessionState.conf.parquetOutputTimestampType.toString)
// Sets compression scheme
conf.set(ParquetOutputFormat.COMPRESSION, parquetOptions.compressionCodecClassName)
// SPARK-15719: Disables writing Parquet summary files by default.
if (conf.get(ParquetOutputFormat.JOB_SUMMARY_LEVEL) == null
&& conf.get(ParquetOutputFormat.ENABLE_JOB_SUMMARY) == null) {
conf.setEnum(ParquetOutputFormat.JOB_SUMMARY_LEVEL, JobSummaryLevel.NONE)
}
if (ParquetOutputFormat.getJobSummaryLevel(conf) == JobSummaryLevel.NONE
&& !classOf[ParquetOutputCommitter].isAssignableFrom(committerClass)) {
// output summary is requested, but the class is not a Parquet Committer
logWarning(s"Committer $committerClass is not a ParquetOutputCommitter and cannot" +
s" create job summaries. " +
s"Set Parquet option ${ParquetOutputFormat.JOB_SUMMARY_LEVEL} to NONE.")
}
new OutputWriterFactory {
// This OutputWriterFactory instance is deserialized when writing Parquet files on the
// executor side without constructing or deserializing ParquetFileFormat. Therefore, we hold
// another reference to ParquetLogRedirector.INSTANCE here to ensure the latter class is
// initialized.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new ParquetOutputWriter(path, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
CodecConfig.from(context).getCodec.getExtension + ".parquet"
}
}
}
override def inferSchema(
sparkSession: SparkSession,
parameters: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val parquetOptions = new ParquetOptions(parameters, sparkSession.sessionState.conf)
// Should we merge schemas from all Parquet part-files?
val shouldMergeSchemas = parquetOptions.mergeSchema
val mergeRespectSummaries = sparkSession.sessionState.conf.isParquetSchemaRespectSummaries
val filesByType = splitFiles(files)
// Sees which file(s) we need to touch in order to figure out the schema.
//
// Always tries the summary files first if users don't require a merged schema. In this case,
// "_common_metadata" is more preferable than "_metadata" because it doesn't contain row
// groups information, and could be much smaller for large Parquet files with lots of row
// groups. If no summary file is available, falls back to some random part-file.
//
// NOTE: Metadata stored in the summary files are merged from all part-files. However, for
// user defined key-value metadata (in which we store Spark SQL schema), Parquet doesn't know
// how to merge them correctly if some key is associated with different values in different
// part-files. When this happens, Parquet simply gives up generating the summary file. This
// implies that if a summary file presents, then:
//
// 1. Either all part-files have exactly the same Spark SQL schema, or
// 2. Some part-files don't contain Spark SQL schema in the key-value metadata at all (thus
// their schemas may differ from each other).
//
// Here we tend to be pessimistic and take the second case into account. Basically this means
// we can't trust the summary files if users require a merged schema, and must touch all part-
// files to do the merge.
val filesToTouch =
if (shouldMergeSchemas) {
// Also includes summary files, 'cause there might be empty partition directories.
// If mergeRespectSummaries config is true, we assume that all part-files are the same for
// their schema with summary files, so we ignore them when merging schema.
// If the config is disabled, which is the default setting, we merge all part-files.
// In this mode, we only need to merge schemas contained in all those summary files.
// You should enable this configuration only if you are very sure that for the parquet
// part-files to read there are corresponding summary files containing correct schema.
// As filed in SPARK-11500, the order of files to touch is a matter, which might affect
// the ordering of the output columns. There are several things to mention here.
//
// 1. If mergeRespectSummaries config is false, then it merges schemas by reducing from
// the first part-file so that the columns of the lexicographically first file show
// first.
//
// 2. If mergeRespectSummaries config is true, then there should be, at least,
// "_metadata"s for all given files, so that we can ensure the columns of
// the lexicographically first file show first.
//
// 3. If shouldMergeSchemas is false, but when multiple files are given, there is
// no guarantee of the output order, since there might not be a summary file for the
// lexicographically first file, which ends up putting ahead the columns of
// the other files. However, this should be okay since not enabling
// shouldMergeSchemas means (assumes) all the files have the same schemas.
val needMerged: Seq[FileStatus] =
if (mergeRespectSummaries) {
Seq.empty
} else {
filesByType.data
}
needMerged ++ filesByType.metadata ++ filesByType.commonMetadata
} else {
// Tries any "_common_metadata" first. Parquet files written by old versions or Parquet
// don't have this.
filesByType.commonMetadata.headOption
// Falls back to "_metadata"
.orElse(filesByType.metadata.headOption)
// Summary file(s) not found, the Parquet file is either corrupted, or different part-
// files contain conflicting user defined metadata (two or more values are associated
// with a same key in different files). In either case, we fall back to any of the
// first part-file, and just assume all schemas are consistent.
.orElse(filesByType.data.headOption)
.toSeq
}
ParquetFileFormat.mergeSchemasInParallel(filesToTouch, sparkSession)
}
case class FileTypes(
data: Seq[FileStatus],
metadata: Seq[FileStatus],
commonMetadata: Seq[FileStatus])
private def splitFiles(allFiles: Seq[FileStatus]): FileTypes = {
val leaves = allFiles.toArray.sortBy(_.getPath.toString)
FileTypes(
data = leaves.filterNot(f => isSummaryFile(f.getPath)),
metadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE),
commonMetadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE))
}
private def isSummaryFile(file: Path): Boolean = {
file.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE ||
file.getName == ParquetFileWriter.PARQUET_METADATA_FILE
}
/**
* Returns whether the reader will return the rows as batch or not.
*/
override def supportBatch(sparkSession: SparkSession, schema: StructType): Boolean = {
val conf = sparkSession.sessionState.conf
conf.parquetVectorizedReaderEnabled && conf.wholeStageEnabled &&
schema.length <= conf.wholeStageMaxNumFields &&
schema.forall(_.dataType.isInstanceOf[AtomicType])
}
override def vectorTypes(
requiredSchema: StructType,
partitionSchema: StructType,
sqlConf: SQLConf): Option[Seq[String]] = {
Option(Seq.fill(requiredSchema.fields.length + partitionSchema.fields.length)(
if (!sqlConf.offHeapColumnVectorEnabled) {
classOf[OnHeapColumnVector].getName
} else {
classOf[OffHeapColumnVector].getName
}
))
}
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
true
}
override def buildReaderWithPartitionValues(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
hadoopConf.set(ParquetInputFormat.READ_SUPPORT_CLASS, classOf[ParquetReadSupport].getName)
hadoopConf.set(
ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA,
requiredSchema.json)
hadoopConf.set(
ParquetWriteSupport.SPARK_ROW_SCHEMA,
requiredSchema.json)
hadoopConf.set(
SQLConf.SESSION_LOCAL_TIMEZONE.key,
sparkSession.sessionState.conf.sessionLocalTimeZone)
ParquetWriteSupport.setSchema(requiredSchema, hadoopConf)
// Sets flags for `ParquetToSparkSchemaConverter`
hadoopConf.setBoolean(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp)
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
// TODO: if you move this into the closure it reverts to the default values.
// If true, enable using the custom RecordReader for parquet. This only works for
// a subset of the types (no complex types).
val resultSchema = StructType(partitionSchema.fields ++ requiredSchema.fields)
val sqlConf = sparkSession.sessionState.conf
val enableOffHeapColumnVector = sqlConf.offHeapColumnVectorEnabled
val enableVectorizedReader: Boolean =
sqlConf.parquetVectorizedReaderEnabled &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
val enableRecordFilter: Boolean = sqlConf.parquetRecordFilterEnabled
val timestampConversion: Boolean = sqlConf.isParquetINT96TimestampConversion
val capacity = sqlConf.parquetVectorizedReaderBatchSize
val enableParquetFilterPushDown: Boolean = sqlConf.parquetFilterPushDown
// Whole stage codegen (PhysicalRDD) is able to deal with batches directly
val returningBatch = supportBatch(sparkSession, resultSchema)
val pushDownDate = sqlConf.parquetFilterPushDownDate
val pushDownTimestamp = sqlConf.parquetFilterPushDownTimestamp
val pushDownDecimal = sqlConf.parquetFilterPushDownDecimal
val pushDownStringStartWith = sqlConf.parquetFilterPushDownStringStartWith
val pushDownInFilterThreshold = sqlConf.parquetFilterPushDownInFilterThreshold
(file: PartitionedFile) => {
assert(file.partitionValues.numFields == partitionSchema.size)
val fileSplit =
new FileSplit(new Path(new URI(file.filePath)), file.start, file.length, Array.empty)
val filePath = fileSplit.getPath
val split =
new org.apache.parquet.hadoop.ParquetInputSplit(
filePath,
fileSplit.getStart,
fileSplit.getStart + fileSplit.getLength,
fileSplit.getLength,
fileSplit.getLocations,
null)
val sharedConf = broadcastedHadoopConf.value.value
lazy val footerFileMetaData =
ParquetFileReader.readFooter(sharedConf, filePath, SKIP_ROW_GROUPS).getFileMetaData
// Try to push down filters when filter push-down is enabled.
val pushed = if (enableParquetFilterPushDown) {
val parquetSchema = footerFileMetaData.getSchema
val parquetFilters = new ParquetFilters(pushDownDate, pushDownTimestamp, pushDownDecimal,
pushDownStringStartWith, pushDownInFilterThreshold)
filters
// Collects all converted Parquet filter predicates. Notice that not all predicates can be
// converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
// is used here.
.flatMap(parquetFilters.createFilter(parquetSchema, _))
.reduceOption(FilterApi.and)
} else {
None
}
// PARQUET_INT96_TIMESTAMP_CONVERSION says to apply timezone conversions to int96 timestamps'
// *only* if the file was created by something other than "parquet-mr", so check the actual
// writer here for this file. We have to do this per-file, as each file in the table may
// have different writers.
// Define isCreatedByParquetMr as function to avoid unnecessary parquet footer reads.
def isCreatedByParquetMr: Boolean =
footerFileMetaData.getCreatedBy().startsWith("parquet-mr")
val convertTz =
if (timestampConversion && !isCreatedByParquetMr) {
Some(DateTimeUtils.getTimeZone(sharedConf.get(SQLConf.SESSION_LOCAL_TIMEZONE.key)))
} else {
None
}
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext =
new TaskAttemptContextImpl(broadcastedHadoopConf.value.value, attemptId)
// Try to push down filters when filter push-down is enabled.
// Notice: This push-down is RowGroups level, not individual records.
if (pushed.isDefined) {
ParquetInputFormat.setFilterPredicate(hadoopAttemptContext.getConfiguration, pushed.get)
}
val taskContext = Option(TaskContext.get())
if (enableVectorizedReader) {
val vectorizedReader = new VectorizedParquetRecordReader(
convertTz.orNull, enableOffHeapColumnVector && taskContext.isDefined, capacity)
val iter = new RecordReaderIterator(vectorizedReader)
// SPARK-23457 Register a task completion lister before `initialization`.
taskContext.foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
vectorizedReader.initialize(split, hadoopAttemptContext)
logDebug(s"Appending $partitionSchema ${file.partitionValues}")
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
if (returningBatch) {
vectorizedReader.enableReturningBatches()
}
// UnsafeRowParquetRecordReader appends the columns internally to avoid another copy.
iter.asInstanceOf[Iterator[InternalRow]]
} else {
logDebug(s"Falling back to parquet-mr")
// ParquetRecordReader returns UnsafeRow
val reader = if (pushed.isDefined && enableRecordFilter) {
val parquetFilter = FilterCompat.get(pushed.get, null)
new ParquetRecordReader[UnsafeRow](new ParquetReadSupport(convertTz), parquetFilter)
} else {
new ParquetRecordReader[UnsafeRow](new ParquetReadSupport(convertTz))
}
val iter = new RecordReaderIterator(reader)
// SPARK-23457 Register a task completion lister before `initialization`.
taskContext.foreach(_.addTaskCompletionListener[Unit](_ => iter.close()))
reader.initialize(split, hadoopAttemptContext)
val fullSchema = requiredSchema.toAttributes ++ partitionSchema.toAttributes
val joinedRow = new JoinedRow()
val appendPartitionColumns = GenerateUnsafeProjection.generate(fullSchema, fullSchema)
// This is a horrible erasure hack... if we type the iterator above, then it actually check
// the type in next() and we get a class cast exception. If we make that function return
// Object, then we can defer the cast until later!
if (partitionSchema.length == 0) {
// There is no partition columns
iter.asInstanceOf[Iterator[InternalRow]]
} else {
iter.asInstanceOf[Iterator[InternalRow]]
.map(d => appendPartitionColumns(joinedRow(d, file.partitionValues)))
}
}
}
}
override def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = dataType match {
case _: AtomicType => true
case st: StructType => st.forall { f => supportDataType(f.dataType, isReadPath) }
case ArrayType(elementType, _) => supportDataType(elementType, isReadPath)
case MapType(keyType, valueType, _) =>
supportDataType(keyType, isReadPath) && supportDataType(valueType, isReadPath)
case udt: UserDefinedType[_] => supportDataType(udt.sqlType, isReadPath)
case _ => false
}
}
object ParquetFileFormat extends Logging {
private[parquet] def readSchema(
footers: Seq[Footer], sparkSession: SparkSession): Option[StructType] = {
val converter = new ParquetToSparkSchemaConverter(
sparkSession.sessionState.conf.isParquetBinaryAsString,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp)
val seen = mutable.HashSet[String]()
val finalSchemas: Seq[StructType] = footers.flatMap { footer =>
val metadata = footer.getParquetMetadata.getFileMetaData
val serializedSchema = metadata
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
if (serializedSchema.isEmpty) {
// Falls back to Parquet schema if no Spark SQL schema found.
Some(converter.convert(metadata.getSchema))
} else if (!seen.contains(serializedSchema.get)) {
seen += serializedSchema.get
// Don't throw even if we failed to parse the serialized Spark schema. Just fallback to
// whatever is available.
Some(Try(DataType.fromJson(serializedSchema.get))
.recover { case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(serializedSchema.get)
}
.recover { case cause: Throwable =>
logWarning(
s"""Failed to parse serialized Spark schema in Parquet key-value metadata:
|\t$serializedSchema
""".stripMargin,
cause)
}
.map(_.asInstanceOf[StructType])
.getOrElse {
// Falls back to Parquet schema if Spark SQL schema can't be parsed.
converter.convert(metadata.getSchema)
})
} else {
None
}
}
finalSchemas.reduceOption { (left, right) =>
try left.merge(right) catch { case e: Throwable =>
throw new SparkException(s"Failed to merge incompatible schemas $left and $right", e)
}
}
}
/**
* Reads Parquet footers in multi-threaded manner.
* If the config "spark.sql.files.ignoreCorruptFiles" is set to true, we will ignore the corrupted
* files when reading footers.
*/
private[parquet] def readParquetFootersInParallel(
conf: Configuration,
partFiles: Seq[FileStatus],
ignoreCorruptFiles: Boolean): Seq[Footer] = {
ThreadUtils.parmap(partFiles, "readingParquetFooters", 8) { currentFile =>
try {
// Skips row group information since we only need the schema.
// ParquetFileReader.readFooter throws RuntimeException, instead of IOException,
// when it can't read the footer.
Some(new Footer(currentFile.getPath(),
ParquetFileReader.readFooter(
conf, currentFile, SKIP_ROW_GROUPS)))
} catch { case e: RuntimeException =>
if (ignoreCorruptFiles) {
logWarning(s"Skipped the footer in the corrupted file: $currentFile", e)
None
} else {
throw new IOException(s"Could not read footer for file: $currentFile", e)
}
}
}.flatten
}
/**
* Figures out a merged Parquet schema with a distributed Spark job.
*
* Note that locality is not taken into consideration here because:
*
* 1. For a single Parquet part-file, in most cases the footer only resides in the last block of
* that file. Thus we only need to retrieve the location of the last block. However, Hadoop
* `FileSystem` only provides API to retrieve locations of all blocks, which can be
* potentially expensive.
*
* 2. This optimization is mainly useful for S3, where file metadata operations can be pretty
* slow. And basically locality is not available when using S3 (you can't run computation on
* S3 nodes).
*/
def mergeSchemasInParallel(
filesToTouch: Seq[FileStatus],
sparkSession: SparkSession): Option[StructType] = {
val assumeBinaryIsString = sparkSession.sessionState.conf.isParquetBinaryAsString
val assumeInt96IsTimestamp = sparkSession.sessionState.conf.isParquetINT96AsTimestamp
val serializedConf = new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
// !! HACK ALERT !!
//
// Parquet requires `FileStatus`es to read footers. Here we try to send cached `FileStatus`es
// to executor side to avoid fetching them again. However, `FileStatus` is not `Serializable`
// but only `Writable`. What makes it worse, for some reason, `FileStatus` doesn't play well
// with `SerializableWritable[T]` and always causes a weird `IllegalStateException`. These
// facts virtually prevents us to serialize `FileStatus`es.
//
// Since Parquet only relies on path and length information of those `FileStatus`es to read
// footers, here we just extract them (which can be easily serialized), send them to executor
// side, and resemble fake `FileStatus`es there.
val partialFileStatusInfo = filesToTouch.map(f => (f.getPath.toString, f.getLen))
// Set the number of partitions to prevent following schema reads from generating many tasks
// in case of a small number of parquet files.
val numParallelism = Math.min(Math.max(partialFileStatusInfo.size, 1),
sparkSession.sparkContext.defaultParallelism)
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
// Issues a Spark job to read Parquet schema in parallel.
val partiallyMergedSchemas =
sparkSession
.sparkContext
.parallelize(partialFileStatusInfo, numParallelism)
.mapPartitions { iterator =>
// Resembles fake `FileStatus`es with serialized path and length information.
val fakeFileStatuses = iterator.map { case (path, length) =>
new FileStatus(length, false, 0, 0, 0, 0, null, null, null, new Path(path))
}.toSeq
// Reads footers in multi-threaded manner within each task
val footers =
ParquetFileFormat.readParquetFootersInParallel(
serializedConf.value, fakeFileStatuses, ignoreCorruptFiles)
// Converter used to convert Parquet `MessageType` to Spark SQL `StructType`
val converter = new ParquetToSparkSchemaConverter(
assumeBinaryIsString = assumeBinaryIsString,
assumeInt96IsTimestamp = assumeInt96IsTimestamp)
if (footers.isEmpty) {
Iterator.empty
} else {
var mergedSchema = ParquetFileFormat.readSchemaFromFooter(footers.head, converter)
footers.tail.foreach { footer =>
val schema = ParquetFileFormat.readSchemaFromFooter(footer, converter)
try {
mergedSchema = mergedSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema of file ${footer.getFile}:\n${schema.treeString}", cause)
}
}
Iterator.single(mergedSchema)
}
}.collect()
if (partiallyMergedSchemas.isEmpty) {
None
} else {
var finalSchema = partiallyMergedSchemas.head
partiallyMergedSchemas.tail.foreach { schema =>
try {
finalSchema = finalSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema:\n${schema.treeString}", cause)
}
}
Some(finalSchema)
}
}
/**
* Reads Spark SQL schema from a Parquet footer. If a valid serialized Spark SQL schema string
* can be found in the file metadata, returns the deserialized [[StructType]], otherwise, returns
* a [[StructType]] converted from the [[MessageType]] stored in this footer.
*/
def readSchemaFromFooter(
footer: Footer, converter: ParquetToSparkSchemaConverter): StructType = {
val fileMetaData = footer.getParquetMetadata.getFileMetaData
fileMetaData
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
.flatMap(deserializeSchemaString)
.getOrElse(converter.convert(fileMetaData.getSchema))
}
private def deserializeSchemaString(schemaString: String): Option[StructType] = {
// Tries to deserialize the schema string as JSON first, then falls back to the case class
// string parser (data generated by older versions of Spark SQL uses this format).
Try(DataType.fromJson(schemaString).asInstanceOf[StructType]).recover {
case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(schemaString).asInstanceOf[StructType]
}.recoverWith {
case cause: Throwable =>
logWarning(
"Failed to parse and ignored serialized Spark schema in " +
s"Parquet key-value metadata:\n\t$schemaString", cause)
Failure(cause)
}.toOption
}
}
| eyalfa/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala | Scala | apache-2.0 | 30,637 |
package lr
import utilities._
object Functions {
def copy(from: Array[Float], to: Array[Float]) = {
assert(from.length == to.length)
System.arraycopy(from, 0, to, 0, from.length)
}
def exp(value: Float) = {
if (value < -10) 4.54e-5
else if (value > 10) 22026
else math.exp(value)
}
def sigmoid(value: Float) = {
if (value < -10) 4.5398e-05f
else if (value > 10) 1-1e-05f
else 1/(1+math.exp(-value).toFloat)
}
def sigmoid(value : Double): Double = {
if (value < -10) 4.5398e-05
else if (value > 10) 1-1e-05f
else 1 / (1 + math.exp(-value))
}
def getBinPred(features: SparseVector, w: Array[Float], numBins: Int): Vector = {
val prob = sigmoid(features.dot(Vector(w)))
val inc = 1.0/numBins
Vector(Array.tabulate(numBins)(i => if (prob > (i-1)*inc) 1.0f else 0.0f ))
}
def l1Prox(nume: Float, deno: Float, lambda: Float) = {
if (nume > lambda) (nume - lambda)/deno
else if (nume < -lambda) (nume + lambda)/deno
else 0f
}
def l2Prox(nume: Float, deno: Float, lambda: Float) = {
nume/(deno + lambda)
}
def getGrad(responses: Array[Boolean], features: SparseMatrix, w: Array[Float],
gradient: Array[Float])= {
val numData = responses.length
val numFeatures = w.length
val ywtx = new Array[Float](numData)
Model.getYWTX(responses, features, w, ywtx)
val ptr = features.row_ptr
val idx = features.col_idx
val value = features.value_r
val isBinary = value == null
var n = 0; var obj = 0.0
while (n < numData) {
val exp = Functions.exp(-ywtx(n)).toFloat
if (ywtx(n) < -10) obj -= ywtx(n)
else if (ywtx(n) > -10 && ywtx(n) < 10) obj += math.log(1 + exp)
ywtx(n) = 1/(1+exp)
n += 1
}
var p = 0
while (p < numFeatures) {
var i = ptr(p)
gradient(p) = 0
while (i < ptr(p+1)) {
val n = idx(i)
if (responses(n) && isBinary) gradient(p) += (1 - ywtx(n))
else if (responses(n) && !isBinary) gradient(p) += (1 - ywtx(n))*value(i)
else if (!responses(n) && isBinary) gradient(p) -= (1 - ywtx(n))
else gradient(p) -= (1 - ywtx(n))*value(i)
i += 1
}
p += 1
}
obj
}
def getHessian(features: SparseMatrix, w: Array[Float], u: Array[Float]) = {
val numData = features.numCols
val numFeatures = features.numRows
val ptr = features.row_ptr
val idx = features.col_idx
val value = features.value_r
val isBinary = value == null
val wtx = new Array[Float](numData)
val utx = new Array[Float](numData)
var p = 0
while (p < numFeatures) {
var i = ptr(p)
while (i < ptr(p+1)) {
val n = idx(i)
if (isBinary) {
wtx(n) += w(p)
utx(n) += u(p)
}
else {
wtx(n) += w(p)*value(i)
utx(n) += u(p)*value(i)
}
i += 1
}
p += 1
}
var n = 0
var hessian = 0f
while (n < numData) {
val sigmoid = Functions.sigmoid(wtx(n))
val alpha = sigmoid*(1-sigmoid)
if (alpha > 1e-5f) hessian += alpha*utx(n)*utx(n)
else hessian += 1e-5f*utx(n)*utx(n)
n += 1
}
hessian
}
def getAUC(tpr: Array[Float], fpr: Array[Float]): Float = {
assert(tpr.length == fpr.length)
var tpr_prev = 0.0f
var fpr_prev = 0.0f
var auc = 0.0f
for (i <- tpr.length-1 to 0 by -1) {
auc += 0.5f*(tpr(i)+tpr_prev)*(fpr(i)-fpr_prev)
tpr_prev = tpr(i)
fpr_prev = fpr(i)
}
auc
}
def getLLH(data: Pair[Boolean, SparseVector], w: SparseVector): Double = {
val features = data._2
val response =
if (data._1) 1
else -1
val yxw = response*features.dot(w)
if (yxw > -10) -math.log(1 + math.exp(-yxw))
else yxw
}
def getLLH(data : (Boolean, SparseVector), w : Vector) : Double = {
val features = data._2
val response =
if (data._1) 1
else -1
val yxw = response*features.dot(w)
if (yxw > -10) -math.log(1 + math.exp(-yxw))
else yxw
}
def getLLH(response: Int, feature: SparseVector, w : Array[Float]) : Double = {
val yxw = response*feature.dot(w)
if (yxw > -10) -math.log(1 + math.exp(-yxw))
else yxw
}
def getLLH(data : (Boolean, SparseVector), w : Array[Float]) : Double = {
val features = data._2
val response =
if (data._1) 1
else -1
val yxw = response*features.dot(Vector(w))
if (yxw > -10) -math.log(1 + math.exp(-yxw))
else yxw
}
def getGradient(data : Pair[Boolean, SparseVector], w : Vector) : SparseVector = {
val features = data._2
val response =
if (data._1) 1
else -1
features*response*(1-sigmoid(response * features.dot(w)))
}
def getGradient(data : Pair[Boolean, SparseVector], w : SparseVector)
: SparseVector = {
val features = data._2
val response =
if (data._1) 1
else -1
features*response*(1-sigmoid(response * features.dot(w)))
}
def getHessian(data : Pair[Boolean, SparseVector], w : Vector, u : Vector) : Float = {
val features = data._2
val sigma = sigmoid(features.dot(w))
val ux = features.dot(u)
return (sigma*(1 - sigma))*ux*ux
}
def getHessian(data : Pair[Boolean, SparseVector], w : SparseVector, u : SparseVector) : Float = {
val features = data._2
val sigma = sigmoid(features.dot(w))
val ux = features.dot(u)
return (sigma*(1 - sigma))*ux*ux
}
def dist_cg(data : Array[(Boolean, SparseVector)], w: Vector, gamma: Float,
max_iter: Int, bayes: Boolean) : (Array[Float], Float) = {
var wi = w
val gamma_old = gamma
val P = w.length
var delta_w = Vector.ones(P)
var u = Vector.ones(P)
var g_old = Vector.ones(P)
var iter = 0
while (iter < max_iter) {
iter += 1
val g = data.par.map(pair => getGradient(pair, wi)).reduce(_+_) - (wi - w)*gamma_old
if (iter > 1) {
val delta_g = g - g_old
val beta = g.dot(delta_g)/u.dot(delta_g)
u = g - u*beta
}
else u = g
g_old = g
val h = data.par.map(pair => getHessian(pair, wi, u)).reduce(_+_)
delta_w = g.dot(u)/(gamma_old*u.dot(u) + h)*u
wi = wi + delta_w
}
val gamma_updated = if (bayes)
(0.5f + 0.5f*P)/(0.5f + 0.5f*wi.squaredDist(w))
else
gamma
((wi*gamma_updated).elements, gamma_updated)
}
def ADMM_CG(data : Array[(Boolean, SparseVector)],
wyr_dist_i: Tuple3[Array[Float], Array[Float], Float], w0: Array[Float],
gamma0: Float, max_iter: Int, bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple5[Array[Float], Array[Float], Float, Float, Int] = {
// stable version for the non-sparse features
var wi = Vector(wyr_dist_i._1)
val ui_old = Vector(wyr_dist_i._2)
val gamma_old = if (bayes) wyr_dist_i._3 else gamma0
val w_global = Vector(w0)
val ui_new = if (!bayes && warmStart) ui_old + wi - w_global else ui_old
val P = w_global.length
var delta_w = Vector.ones(P)
var u = Vector.ones(P)
var g_old = Vector.ones(P)
val prior = w_global-ui_new
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
var l2diff = 0f
while (iter < max_iter && obj-obj_old > obj_th && g_old.squaredL2Norm > 1e-3) {
iter += 1
obj_old = obj
val g = data.par.map(pair => getGradient(pair, wi)).reduce(_+_) - (wi - prior)*gamma_old
if (iter > 1) {
val delta_g = g - g_old
var beta = g.dot(delta_g)/u.dot(delta_g)
beta = if (beta>10) 10 else if (beta < -10) -10 else beta
u = g - u*beta
}
else u = g
g_old = g
val h = data.par.map(pair => getHessian(pair, wi, u)).reduce(_+_) + gamma_old*u.dot(u) + 1e-5f
delta_w = g.dot(u)/h*u
wi = wi + delta_w
l2diff = wi.squaredDist(prior)
obj = data.par.map(pair => getLLH(pair, wi)).reduce(_+_) - gamma_old/2*l2diff
}
obj += gamma_old/2*l2diff
val gamma_new =
if (bayes) {
val gamma = (0.5f + 0.5f*P)/(0.5f + 0.5f*(l2diff+gamma0))
math.max(gamma, gamma_old)
}
else gamma_old
if (bayes)
obj -= gamma_new/2*l2diff
else
obj -= ui_new.dot(wi-w_global)*gamma_new
(wi.elements, ui_new.elements, gamma_new, obj.toFloat, iter)
}
def ADMM_CG(data : Array[(Boolean, SparseVector)],
wyr_dist_i: Tuple4[Array[Int], Array[Float], Array[Float], Float], w0: Array[Float], gamma: Float,
alpha: Float, beta: Float, max_iter: Int, bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple6[Array[Int], Array[Float], Array[Float], Float, Float, Int] = {
//the stable version, exploits the sparse structure in each partition
val keyArray = wyr_dist_i._1
var wi = SparseVector(keyArray, wyr_dist_i._2)
val ui = SparseVector(keyArray, wyr_dist_i._3)
val w_global = SparseVector(keyArray, Vector(w0))
val gamma_old = if (bayes) wyr_dist_i._4 else gamma
val ui_new = if (!bayes && warmStart) ui + wi - w_global else ui
val prior = w_global-ui_new
var delta_w = SparseVector(keyArray)
var u = SparseVector(keyArray)
var g_old = SparseVector(keyArray)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
var l2diff = 0f
while (iter < max_iter && math.abs(obj-obj_old) > obj_th && g_old.squaredL2Norm > 1e-3) {
iter += 1
obj_old = obj
val g = data.par.map(pair => getGradient(pair, wi)).reduce(_+_) - (wi - prior)*gamma_old
if (iter > 1) {
val delta_g = g - g_old
val beta = (g.dot(delta_g) + 1e-5f)/(u.dot(delta_g) + 1e-5f)
u = g - u*beta
}
else u = g
g_old = g
val h = data.par.map(pair => getHessian(pair, wi, u)).reduce(_+_) + gamma_old*u.dot(u) + 1e-5f
delta_w = g.dot(u)/h*u
wi = wi + delta_w
l2diff = wi.squaredL2Dist(prior)
obj = data.par.map(pair => getLLH(pair, wi)).reduce(_+_) - gamma_old/2*l2diff
}
obj += gamma_old/2*l2diff
val gamma_new =
if (bayes)
(alpha + 0.5f*wi.size)/(beta + 0.5f*(l2diff+gamma))
else
gamma
if (bayes)
obj -= gamma_new/2*l2diff
else
obj -= ui_new.dot(wi-w_global)*gamma_new
(keyArray, wi.getValues, ui_new.getValues, gamma_new, obj.toFloat, iter)
}
def dist_cg_t(data : Array[(Boolean, SparseVector)],
wy_dist_i: Tuple3[Array[Int], Array[Float], Array[Float]], w0: Vector, gamma: Float,
alpha: Float, beta: Float, max_iter: Int, obj_th: Double, bayes: Boolean, filter_th : Double)
: Tuple6[Array[Int], Array[Float], Array[Float], Float, Float, Int] = {
// prior on w_i is the heavy-tail t-distribution
val keyArray = wy_dist_i._1
var wi = SparseVector(keyArray, wy_dist_i._2)
val yi = SparseVector(keyArray, wy_dist_i._3)
val w = SparseVector(keyArray, w0)
val yi_new = if (!bayes) yi + wi - w else yi
// val prior = if (bayes) w else w-yi_new
val prior = w
val P = keyArray.size
var residual = (wi-prior).squaredL2Norm
if (bayes && gamma < filter_th)
return (keyArray, wy_dist_i._2, wy_dist_i._3, filter_th.toFloat, residual, 0)
var u = SparseVector(keyArray)
var g_old = SparseVector(keyArray)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
iter += 1
obj_old = obj
val diff = wi - w
val weight = if (bayes) (alpha+P*0.5f)/(beta + 0.5f*residual) else gamma
val g = data.par.map(pair => getGradient(pair, wi)).reduce(_+_) - weight*diff
if (iter > 1) {
val delta_g = g - g_old
val beta = g.dot(delta_g)/u.dot(delta_g)
u = g - u*beta
}
else u = g
g_old = g
val h_reg = if (bayes) {
val diff_u = u.dot(diff)
- weight*(u.dot(u)-diff_u*diff_u/(beta + 0.5f*residual))
}
else
- gamma*u.dot(u)
val h = - data.par.map(pair => getHessian(pair, wi, u)).reduce(_+_) + h_reg
val delta_w = g.dot(u)/h*u
wi = wi - delta_w
residual = (wi-prior).squaredL2Norm
val obj_reg = if (bayes) (alpha+P*0.5f)*math.log(1+residual/(2*beta)) else gamma/2*residual
obj = data.par.map(pair => getLLH(pair, wi)).reduce(_+_) - obj_reg
}
val gamma_updated = if (bayes) (alpha + 0.5f*P)/(beta + 0.5f*residual) else gamma
(keyArray, wi.getValues, yi_new.getValues, gamma_updated, residual, iter)
}
def dist_cg(data : (Array[Int], Array[(Boolean, SparseVector)]), w0: Vector, gamma: Float,
max_iter: Int, th: Double, bayes: Boolean): Tuple3[Array[Int], Array[Float], Float] = {
// initialization of each partition-specific w_i is based on the global weight w
val keyArray = data._1
var wi = SparseVector(keyArray, w0)
val w = SparseVector(keyArray, w0)
var delta_w = SparseVector(keyArray)
var u = SparseVector(keyArray)
var g_old = SparseVector(keyArray)
var iter = 0
var obj = 0.0
var obj_old = Double.NegativeInfinity
while (iter < max_iter && math.abs(obj-obj_old) > th) {
iter += 1
obj_old = obj
val g = data._2.par.map(pair => getGradient(pair, wi)).reduce(_+_) - (wi - w)*gamma
if (iter > 1) {
val delta_g = g - g_old
val beta = g.dot(delta_g)/u.dot(delta_g)
u = g - u*beta
}
else u = g
g_old = g
val h = data._2.par.map(pair => getHessian(pair, wi, u)).reduce(_+_)
delta_w = g.dot(u)/(gamma*u.dot(u) + h)*u
wi = wi + delta_w
obj = data._2.par.map(pair => getLLH(pair, wi)).reduce(_+_) + gamma/2*(wi - w).squaredL2Norm
}
val gamma_updated = if (bayes)
(0.5f + 0.5f*wi.size)/(0.5f + 0.5f*wi.squaredL2Dist(w))
else
gamma
(keyArray, wi.getValues, gamma_updated)
}
def EN_CD(data_colView : Pair[Array[Boolean], Array[(Int, SparseVector)]],
w_dist: (Array[Int], Array[Float]), w0: Array[Float], eta: Array[Float], gamma: Float,
max_iter: Int, obj_th: Double, l1: Boolean, l2: Boolean, ard: Boolean, warmStart: Boolean)
: Tuple4[Array[Int], Array[Float], Float, Int] = {
//coordinate descent for elastic net
val y = data_colView._1
val x_colView = data_colView._2
val numData = y.length
val w_indices = w_dist._1
val w_values = w_dist._2
val w_values_old = Array.tabulate(w_values.length)(i => w_values(i))
val numLocalFeatures = w_indices.length
assert(numLocalFeatures == x_colView.length)
val sigma_wx = new Array[Float](numData)
val residual = new Array[Float](numData)
val w_updated = SparseVector(w_indices, w_values)
val w_global = SparseVector(w_indices, Vector(w0))
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
if (warmStart) {
// calculating the weight using previous w
var i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val isBinary = x_colView(i)._2.isBinary
assert(p==w_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
i += 1
}
}
var n = 0
while (n < numData) {
sigma_wx(n) = if (warmStart) sigmoid(sigma_wx(n)) else 0.5f
residual(n) = 0
n += 1
}
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
var innerIter = 0
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
val isBinary = x_colView(i)._2.isBinary
assert(p==w_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
var nume = 0f
var deno = 0f
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += w_values(i)*x_np
if (y(n)) nume += (1-sigma_wx(n))*x_np*(1+sigma_wx(n)*residual(n))
else nume += sigma_wx(n)*x_np*(-1+(1-sigma_wx(n))*residual(n))
if (sigma_wx(n) < 1e-5 || sigma_wx(n) > 1-1e-5) deno += 1e-5f*x_np*x_np
else deno += sigma_wx(n)*(1-sigma_wx(n))*x_np*x_np
residual(n) -= w_values(i)*x_np
j += 1
}
if (l2) {
nume += gamma*w0(p)
deno += gamma
}
else deno += gamma
w_values(i) =
if (l1) {
if ((nume - eta(p))/deno > w0(p))
(nume - eta(p))/deno
else if ((nume + eta(p))/deno < w0(p))
(nume + eta(p))/deno
else
w0(p)
}
else nume/deno
// nume += gamma*prior_values(i)
// deno += gamma
//
// val w_values_old = w_values(i)
// w_values(i) = nume/deno
if (math.abs(w_values_old(i)-w_values(i)) > 1e-3) {
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += (w_values_old(i)-w_values(i))*x_np
j += 1
}
}
i += 1
}
n = 0; while (n<numData) {residual(n) = 0; n += 1}
var reg = if (l2) gamma/2*(w_updated.squaredL2Dist(w_global)) else 0
if (l1) {
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
reg += eta(p)*math.abs(w_values(i) - w0(p))
i += 1
}
}
n = 0; while (n < numData) { sigma_wx(n) = 0; n += 1 }
i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
assert(p==w_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
w_values_old(i) = w_values(i)
i += 1
}
obj = 0
n = 0
while (n < numData) {
val y_n = if (y(n)) 1 else -1
//calculate the objective function
if (y_n*sigma_wx(n) > -5) obj += -math.log(1 + math.exp(-y_n*sigma_wx(n)))
else obj += y_n*sigma_wx(n)
sigma_wx(n) = sigmoid(sigma_wx(n))
n += 1
}
// obj -= reg
iter += 1
// println("cd iter: " + iter + " new obj: " + obj)
}
// if (iter == max_iter)
// println("this one didn't converge! " + "obj: " + obj + " \t old obj: " + obj_old + "\n")
// else println("this one converged! Iter: " + iter + "\n")
val gamma_new =
if (ard)
(1 + 0.5f*numLocalFeatures)/(1 + 0.5f*w_updated.squaredL2Dist(w_global))
else
gamma
(w_indices, w_values, gamma_new, iter)
}
def updateW_BS(w_p: Seq[(Int, Float)], eta_p: Float, gamma: Array[Float], lambda: Float,
l1: Boolean, l2: Boolean) : Float = {
//a binary search based solution to update global weight parameter w
val numPartitions = w_p.length
def getObj(w: Float) : Float = {
// calculate the objective function
var sum = 0f
var i = 0
while (i < numPartitions) {
val k = w_p(i)._1
val w_pk = w_p(i)._2
if (l1) sum += eta_p*math.abs(w-w_pk)
if (l2) sum += gamma(k)/2*(w-w_pk)*(w-w_pk)
i += 1
}
sum + lambda/2*w*w
}
var nume = 0f
var deno = 0f
var i = 0
if (l2) {
while (i < numPartitions) {
val k = w_p(i)._1
val w_pk = w_p(i)._2
nume += w_pk*gamma(k)
deno += gamma(k)
i += 1
}
}
deno += lambda
if (l1) {
//binary search to find the optimal w
var d_max = numPartitions
var d_min = -numPartitions
var i = 0
while (d_max > d_min) {
val d_mid = if (d_min+1==d_max) d_min else (d_max+d_min)/2
val obj_mid = getObj((nume+eta_p*d_mid)/deno)
val obj_right = getObj((nume+eta_p*(d_mid+1))/deno)
if(obj_right >= obj_mid) d_max = d_mid
else d_min = d_mid+1
i += 1
// assert(i <= math.log(numPartitions*2)/math.log(2)+2,
// "binary search reached " + i + " iterations! d_min: " + d_min + " d_max: " + d_max +
// " numPartitions: " + numPartitions)
}
assert(d_min == d_max)
(nume+eta_p*d_min)/deno
}
else {
nume/deno
}
}
//todo: re-write the logistic regression code, using the column sparse format
def ADMM_CD(data_colView : Pair[Array[Boolean], Array[(Int, SparseVector)]],
wug_dist: (Array[Int], Array[Float], Array[Float], Float), w0: Array[Float], gamma: Float,
alpha: Float, beta: Float, max_iter: Int, bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple6[Array[Int], Array[Float], Array[Float], Float, Float, Int] = {
//coordinate descent for (B)-ADMM, sparse version
val y = data_colView._1
val x_colView = data_colView._2
val numData = y.length
val key_indices = wug_dist._1
val w_values = wug_dist._2
val w_updated = SparseVector(key_indices, w_values)
val u_old = SparseVector(key_indices, wug_dist._3)
val w_global = SparseVector(key_indices, Vector(w0))
val gamma_old = if (bayes) wug_dist._4 else gamma
val u_new = if (!bayes && warmStart) u_old + w_updated - w_global else u_old
val prior = w_global-u_new
val prior_values = prior.getValues
val numLocalFeatures = key_indices.length
assert(numLocalFeatures == x_colView.length)
val sigma_wx = new Array[Float](numData)
val residual = new Array[Float](numData)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
var reg = 0f
if (warmStart) {
// calculating the weight using previous w
var i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
assert(p==key_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
i += 1
}
}
var n = 0
while (n < numData) {
sigma_wx(n) = if (warmStart) sigmoid(sigma_wx(n)) else 0.5f
//why set the residual to 0 at the beginning?
residual(n) = 0
n += 1
}
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
obj_old = obj
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
assert(p==key_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var nume = 0f
var deno = 0f
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += w_values(i)*x_np
if (y(n)) nume += (1-sigma_wx(n))*x_np*(1+sigma_wx(n)*residual(n))
else nume += sigma_wx(n)*x_np*(-1+(1-sigma_wx(n))*residual(n))
// if (y(n)) nume += (1-sigma_wx(n))*sigma_wx(n)*x_np*(1/sigma_wx(n)+residual(n))
// else nume += (1-sigma_wx(n))*sigma_wx(n)*x_np*(-1/(1-sigma_wx(n))+residual(n))
if (sigma_wx(n) < 1e-5 || sigma_wx(n) > 1-1e-5) deno += 1e-5f*x_np*x_np
else deno += sigma_wx(n)*(1-sigma_wx(n))*x_np*x_np
residual(n) -= w_values(i)*x_np
j += 1
}
nume += gamma_old*prior_values(i)
deno += gamma_old
val w_values_old = w_values(i)
w_values(i) = nume/deno
if (math.abs(w_values_old-w_values(i)) > 1e-5) {
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += (w_values_old-w_values(i))*x_np
j += 1
}
}
i += 1
}
reg = gamma_old/2*(w_updated.squaredL2Dist(prior))
//why set the residual to 0 before each iteration starts?
n = 0; while (n < numData) { sigma_wx(n) = 0; residual(n) = 0; n += 1 }
i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
i += 1
}
obj = 0
n = 0
while (n < numData) {
val y_n = if (y(n)) 1 else -1
//calculate the objective function
if (y_n*sigma_wx(n) > -5) obj += -math.log(1 + math.exp(-y_n*sigma_wx(n)))
else obj += y_n*sigma_wx(n)
sigma_wx(n) = sigmoid(sigma_wx(n))
n += 1
}
obj -= reg
iter += 1
}
val gamma_new =
if (bayes)
(alpha + 0.5f*numLocalFeatures)/(beta + 0.5f*(w_updated.squaredL2Dist(prior)+gamma))
else
gamma
obj += reg
if (bayes) obj -= gamma_new/2*(w_updated.squaredL2Dist(w_global))
else obj -= u_new.dot(w_updated - w_global)*gamma_new
(key_indices, w_values, u_new.getValues, gamma_new, obj.toFloat, iter)
}
def ADMM_CD(data_colView : Pair[Array[Boolean], Array[(Int, SparseVector)]],
wug_dist: (Array[Float], Array[Float], Float), w0: Array[Float], gamma: Float,
alpha: Float, beta: Float, max_iter: Int, bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple5[Array[Float], Array[Float], Float, Float, Int] = {
//coordinate descent for (B)-ADMM, non-sparse version
val y = data_colView._1
val x_colView = data_colView._2
val numData = y.length
val w_values = wug_dist._1
val w_updated = Vector(w_values)
val u_old = Vector(wug_dist._2)
val w_global = Vector(w0)
val gamma_old = if (bayes) wug_dist._3 else gamma
val u_new = if (!bayes && warmStart) u_old + w_updated - w_global else u_old
val prior = w_global-u_new
val prior_values = prior.elements
val numLocalFeatures = x_colView.length
val sigma_wx = new Array[Float](numData)
val residual = new Array[Float](numData)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
var reg = 0f
if (warmStart) {
// calculating the weight using previous w
var i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
var j = 0
val isBinary = x_colView(i)._2.isBinary
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_np*w_values(p)
j += 1
}
i += 1
}
}
var n = 0
while (n < numData) {
sigma_wx(n) = if (warmStart) sigmoid(sigma_wx(n)) else 0.5f
residual(n) = 0
n += 1
}
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
obj_old = obj
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var nume = 0f
var deno = 0f
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += w_values(p)*x_np
if (y(n)) nume += (1-sigma_wx(n))*x_np*(1+sigma_wx(n)*residual(n))
else nume += sigma_wx(n)*x_np*(-1+(1-sigma_wx(n))*residual(n))
if (sigma_wx(n) < 1e-5 || sigma_wx(n) > 1-1e-5) deno += 1e-5f*x_np*x_np
else deno += sigma_wx(n)*(1-sigma_wx(n))*x_np*x_np
residual(n) -= w_values(p)*x_np
j += 1
}
nume += gamma_old*prior_values(p)
deno += gamma_old
val w_values_old = w_values(p)
w_values(p) = nume/deno
if (math.abs(w_values_old-w_values(p)) > 1e-5) {
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += (w_values_old-w_values(p))*x_np
j += 1
}
}
i += 1
}
reg = gamma_old/2*(w_updated.squaredDist(prior))
n = 0; while (n < numData) { sigma_wx(n) = 0; residual(n) = 0; n += 1 }
i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_np*w_values(p)
j += 1
}
i += 1
}
obj = 0
n = 0
while (n < numData) {
val y_n = if (y(n)) 1 else -1
//calculate the objective function
if (y_n*sigma_wx(n) > -5) obj += -math.log(1 + math.exp(-y_n*sigma_wx(n)))
else obj += y_n*sigma_wx(n)
sigma_wx(n) = sigmoid(sigma_wx(n))
n += 1
}
obj -= reg
assert(!obj.isInfinity && !obj.isNaN(), "obj is inf or nan")
iter += 1
}
val gamma_new =
if (bayes)
(alpha + 0.5f*numLocalFeatures)/(beta + 0.5f*(w_updated.squaredDist(prior)+gamma))
else
gamma
obj += reg
if (bayes) obj -= gamma_new/2*(w_updated.squaredDist(w_global))
else obj -= u_new.dot(w_updated - w_global)*gamma_new
(w_values, u_new.elements, gamma_new, obj.toFloat, iter)
}
def BDL_CD(data_colView : Pair[Array[Boolean], Array[(Int, SparseVector)]],
wg_dist: (Array[Int], Array[Float], Array[Float]), w0: Array[Float], variance: Array[Float],
alpha: Float, beta: Float, max_iter: Int, gamma0: Float,
bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple5[Array[Int], Array[Float], Array[Float], Float, Int] = {
//coordinate descent for BDL, sparse version
val y = data_colView._1
val x_colView = data_colView._2
val numData = y.length
val key_indices = wg_dist._1
val numLocalFeatures = key_indices.length
assert(numLocalFeatures == x_colView.length)
val w_values = wg_dist._2
val gamma = if (bayes && warmStart) wg_dist._3 else Array.tabulate(numLocalFeatures)(_=>gamma0)
val w_updated = SparseVector(key_indices, w_values)
val sigma_wx = new Array[Float](numData)
val residual = new Array[Float](numData)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
if (warmStart) {
// calculating the weight using previous w
var i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
assert(p==key_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
val x_p_values = x_colView(i)._2.getValues
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = x_p_values(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
i += 1
}
}
var n = 0
while (n < numData) {
sigma_wx(n) = if (warmStart) sigmoid(sigma_wx(n)) else 0.5f
residual(n) = 0
n += 1
}
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
obj_old = obj
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
assert(p==key_indices(i), "feature index mismatch")
val x_p_indices = x_colView(i)._2.getIndices
val x_p_values = x_colView(i)._2.getValues
var nume = 0f
var deno = 0f
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = x_p_values(j)
residual(n) += w_values(i)*x_np
if (y(n)) nume += (1-sigma_wx(n))*x_np*(1+sigma_wx(n)*residual(n))
else nume += sigma_wx(n)*x_np*(-1+(1-sigma_wx(n))*residual(n))
if (sigma_wx(n) < 1e-5 || sigma_wx(n) > 1-1e-5) deno += 1e-5f*x_np*x_np
else deno += sigma_wx(n)*(1-sigma_wx(n))*x_np*x_np
residual(n) -= w_values(i)*x_np
j += 1
}
nume += gamma(i)*w0(key_indices(i))
deno += gamma(i)
val w_values_old = w_values(i)
w_values(i) = nume/deno
if (math.abs(w_values_old-w_values(i)) > 1e-3) {
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = x_p_values(j)
residual(n) += (w_values_old-w_values(i))*x_np
j += 1
}
}
i += 1
}
obj = 0
n = 0; while (n < numData) { sigma_wx(n) = 0; residual(n) = 0; n += 1 }
i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val x_p_values = x_colView(i)._2.getValues
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = x_p_values(j)
sigma_wx(n) += x_pn*w_values(i)
j += 1
}
val res = w_values(i) - w0(key_indices(i))
obj -= gamma(i)/2*(res*res)
i += 1
}
n = 0
while (n < numData) {
val y_n = if (y(n)) 1 else -1
//calculate the objective function
if (y_n*sigma_wx(n) > -5) obj += -math.log(1 + math.exp(-y_n*sigma_wx(n)))
else obj += y_n*sigma_wx(n)
sigma_wx(n) = sigmoid(sigma_wx(n))
n += 1
}
iter += 1
}
if (bayes) {
var i = 0
var tmp = 0f
while(i < numLocalFeatures) {
val res = w_values(i)-w0(key_indices(i))
// tmp += res*res+variance(key_indices(i))
gamma(i) = (alpha + 0.5f)/(beta + 0.5f*(res*res+variance(key_indices(i))))
i += 1
}
// i = 0
// while(i < numLocalFeatures) {
// gamma(i) = (alpha + 0.5f*numLocalFeatures)/(beta + 0.5f*tmp)
// i += 1
// }
}
(key_indices, w_values, gamma, obj.toFloat, iter)
}
// def BDL_CG(data : Array[(Boolean, SparseVector)],
// wg_dist: (Array[Int], Array[Float], Array[Float]), w0: Array[Float], variance: Array[Float],
// alpha: Float, beta: Float, max_iter: Int, gamma0: Float,
// bayes: Boolean, warmStart : Boolean, obj_th: Double)
// : Tuple5[Array[Int], Array[Float], Array[Float], Float, Int] = {
//
// //conjugate gradient descent for BDL
// val keyArray = wg_dist._1
// val numLocalFeatures = keyArray.length
// var wi = SparseVector(keyArray, wg_dist._2)
// val w = SparseVector(keyArray, Vector(w0))
// val gamma =
// if (bayes && warmStart)
// SparseVector(keyArray, Vector(wg_dist._3))
// else
// SparseVector(keyArray, gamma0)
//
// var delta_w = SparseVector(keyArray)
// var u = SparseVector(keyArray)
// var g_old = SparseVector(keyArray)
// var iter = 0
// var obj = -Double.MaxValue
// var obj_old = Double.NegativeInfinity
// while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
// iter += 1
// obj_old = obj
// val g = data.par.map(pair => getGradient(pair, wi)).reduce(_+_) - (wi - w)*gamma
// if (iter > 1) {
// val delta_g = g - g_old
// val beta = g.dot(delta_g)/u.dot(delta_g)
// u = g - u*beta
// }
// else u = g
// g_old = g
// val h = data.par.map(pair => getHessian(pair, wi, u)).reduce(_+_) + gamma*u.dot(u)
// delta_w = g.dot(u)/h*u
// wi = wi + delta_w
// obj = data.par.map(pair => getLLH(pair, wi)).reduce(_+_) - gamma/2*(wi - prior).squaredL2Norm
// }
//
// val dist = if (bayes) wi.squaredL2Dist(w) else 0
// val gamma_new =
// if (bayes)
// (alpha + 0.5f*wi.size)/(beta + 0.5f*(dist+gamma0))
// else
// gamma
// (keyArray, wi.getValues, gamma, obj.toFloat, iter)
// }
def BDL_CD(data_colView : Pair[Array[Boolean], Array[(Int, SparseVector)]],
wg_dist: (Array[Float], Array[Float]), w0: Array[Float], variance: Array[Float],
alpha: Float, beta: Float, max_iter: Int, gamma0: Float,
bayes: Boolean, warmStart : Boolean, obj_th: Double)
: Tuple4[Array[Float], Array[Float], Float, Int] = {
//coordinate descent for BDL
val y = data_colView._1
val x_colView = data_colView._2
val numData = y.length
val w_values = wg_dist._1
val numFeatures = w_values.length
val numLocalFeatures = x_colView.length
val gamma = if (bayes && warmStart) wg_dist._2 else Array.tabulate(numFeatures)(_=>gamma0)
val w_updated = Vector(w_values)
val sigma_wx = new Array[Float](numData)
val residual = new Array[Float](numData)
var iter = 0
var obj = -Double.MaxValue
var obj_old = Double.NegativeInfinity
if (warmStart) {
// calculating the weight using previous w
var i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(p)
j += 1
}
i += 1
}
}
var n = 0
while (n < numData) {
sigma_wx(n) = if (warmStart) sigmoid(sigma_wx(n)) else 0.5f
residual(n) = 0
n += 1
}
while (iter < max_iter && math.abs(obj-obj_old) > obj_th) {
obj_old = obj
var i = 0
while (i < numLocalFeatures) {
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var nume = 0f
var deno = 0f
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += w_values(p)*x_np
if (y(n)) nume += (1-sigma_wx(n))*x_np*(1+sigma_wx(n)*residual(n))
else nume += sigma_wx(n)*x_np*(-1+(1-sigma_wx(n))*residual(n))
if (sigma_wx(n) < 1e-5 || sigma_wx(n) > 1-1e-5) deno += 1e-5f*x_np*x_np
else deno += sigma_wx(n)*(1-sigma_wx(n))*x_np*x_np
residual(n) -= w_values(p)*x_np
j += 1
}
nume += gamma(p)*w0(p)
deno += gamma(p)
val w_values_old = w_values(p)
w_values(p) = nume/deno
if (math.abs(w_values_old-w_values(p)) > 1e-3) {
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_np = if (isBinary) 1 else x_colView(i)._2.getValues(j)
residual(n) += (w_values_old-w_values(p))*x_np
j += 1
}
}
i += 1
}
obj = 0
n = 0; while (n < numData) { sigma_wx(n) = 0; residual(n) = 0; n += 1 }
i = 0
while (i < numLocalFeatures) {
// p is the global feature index, i is the local feature index
val p = x_colView(i)._1
val x_p_indices = x_colView(i)._2.getIndices
val isBinary = x_colView(i)._2.isBinary
var j = 0
while (j < x_p_indices.length) {
val n = x_p_indices(j)
val x_pn = if (isBinary) 1 else x_colView(i)._2.getValues(j)
sigma_wx(n) += x_pn*w_values(p)
j += 1
}
val res = w_values(p) - w0(p)
obj -= gamma(p)/2*(res*res)
i += 1
}
n = 0
while (n < numData) {
val y_n = if (y(n)) 1 else -1
//calculate the objective function
if (y_n*sigma_wx(n) > -5) obj += -math.log(1 + math.exp(-y_n*sigma_wx(n)))
else obj += y_n*sigma_wx(n)
sigma_wx(n) = sigmoid(sigma_wx(n))
n += 1
}
iter += 1
}
if (bayes) {
var p = 0
var tmp = 0f
while(p < numFeatures) {
val res = w_values(p)-w0(p)
gamma(p) = (alpha + 0.5f)/(beta + 0.5f*(res*res+variance(p)))
p += 1
}
}
(w_values, gamma, obj.toFloat, iter)
}
} | XianXing/bdl | src/main/scala/bdl/lr/Functions.scala | Scala | apache-2.0 | 41,894 |
package ui.shader.builder
import ui.shader.builder.types.GlBoolType
import ui.shader.builder.value.GlValue
class GlIf(val cond: GlValue[GlBoolType],
val thenBlock: GlBlock,
val elseBlock: Option[GlBlock] = None) extends GlCommand{
override def toGlsl: String = {
s"if (${cond.toGlsl}) " +
s"${thenBlock.toGlsl}\\n" +
s"${elseBlock.map((block) => s"else ${block.toGlsl}").mkString("")}"
}
}
object GlIf {
def apply(cond: GlValue[GlBoolType],
thenBlock: GlBlock,
elseBlock: Option[GlBlock] = None): GlIf = {
new GlIf(cond, thenBlock, elseBlock)
}
}
| gvatn/play-scalajs-webgl-spark | client/src/main/scala/ui/shader/builder/GlIf.scala | Scala | mit | 645 |
package ingredients.tuplecutter
import org.scalatest.{ Matchers, WordSpec }
class TupleCutterSpec extends WordSpec with Matchers {
"Tuple.tail" should {
"remove first element of a 2-tuple" in {
("A", 2).tail shouldBe Tuple1(2)
}
"remove the first element of a 3-tuple" in {
("A", 2, "C").tail shouldBe (2, "C")
}
"remove the first element of a 8-tuple" in {
("💩", 2, "C", "d", List(), Map(1 -> "ciao"), 7, 8).tail shouldBe (
2, "C", "d", List(), Map(1 -> "ciao"), 7, 8)
}
}
"Tuple.head" should {
"retrieve the first element of a 2-tuple" in {
("A", 2).head shouldBe "A"
}
"retrieve the first element of a 3-tuple" in {
("A", 2, "C").head shouldBe "A"
}
"retrieve the first element of a 8-tuple" in {
("☃", 2, "C", "d", List(), Map(1 -> "ciao"), 7, 8).head shouldBe "☃"
}
}
}
| buildo/ingredients | tuplecutter/src/test/scala/io.buildo.ingredients/tuplecutter/TupleCutter.scala | Scala | mit | 886 |
package org.jetbrains.plugins.scala
package codeInspection.parentheses
import com.intellij.codeInspection.LocalInspectionTool
import org.jetbrains.plugins.scala.codeInspection.ScalaLightInspectionFixtureTestAdapter
/**
* Nikolay.Tropin
* 4/29/13
*/
class UnnecessaryParenthesesInspectionTest extends ScalaLightInspectionFixtureTestAdapter{
val annotation = "Unnecessary parentheses"
val hintBeginning = "Remove unnecessary parentheses"
protected def classOfInspection: Class[_ <: LocalInspectionTool] = classOf[ScalaUnnecessaryParenthesesInspection]
def test_1(): Unit = {
val selected = START + "(1 + 1)" + END
check(selected)
val text = "(<caret>1 + 1)"
val result = "1 + 1"
val hint = hintBeginning + " (1 + 1)"
testFix(text, result, hint)
}
def test_2(): Unit = {
val text = "1 + (1 * 2)"
checkTextHasNoErrors(text)
}
def test_3(): Unit = {
val selected = s"""
|def f(n: Int): Int = n match {
| case even if $START(<caret>even % 2 == 0)$END => (even + 1)
| case odd => 1 + (odd * 3)
|}
"""
check(selected)
val text = """
|def f(n: Int): Int = n match {
| case even if (<caret>even % 2 == 0) => (even + 1)
| case odd => 1 + (odd * 3)
|}
"""
val result = """
|def f(n: Int): Int = n match {
| case even if even % 2 == 0 => (even + 1)
| case odd => 1 + (odd * 3)
|}
"""
val hint = hintBeginning + " (even % 2 == 0)"
testFix(text, result, hint)
}
def test_4(): Unit = {
val selected = s"""
|def f(n: Int): Int = n match {
| case even if (even % 2 == 0) => $START(even + 1<caret>)$END
| case odd => 1 + (odd * 3)
|}
"""
check(selected)
val text = """
|def f(n: Int): Int = n match {
| case even if (even % 2 == 0) => (even + 1<caret>)
| case odd => 1 + (odd * 3)
|}
"""
val result = """
|def f(n: Int): Int = n match {
| case even if (even % 2 == 0) => even + 1
| case odd => 1 + (odd * 3)
|}
"""
val hint = hintBeginning + " (even + 1)"
testFix(text, result, hint)
}
def test_5(): Unit = {
val text = "1 :: (2 :: Nil)"
checkTextHasNoErrors(text)
}
def test_6(): Unit = {
val selected = "val a = " + START + "((<caret>(1)))" + END
check(selected)
val text = "val a = ((<caret>(1)))"
val result = "val a = 1"
val hint = hintBeginning + " (((1)))"
testFix(text, result, hint)
}
def test_7(): Unit = {
val text = """def a(x: Any): Boolean = true
|List() count (a(_))"""
checkTextHasNoErrors(text, annotation, classOf[ScalaUnnecessaryParenthesesInspection])
}
def test_8(): Unit = {
val selected = "1 to " + START +"((1, 2))" + END
check(selected)
val text = "1 to ((1, 2))"
val result = "1 to (1, 2)"
val hint = hintBeginning + " ((1, 2))"
testFix(text, result, hint)
}
def test_9(): Unit = {
val text = """(List("a")
| :+ new String("b")
| :+ new String("c")
| :+ new String("d"))"""
checkTextHasNoErrors(text)
}
def test_10(): Unit = {
val selected = START + "(/*b*/ 1 + /*a*/ 1 /*comment*/)" + END
check(selected)
val text = "(<caret>/*b*/ 1 + /*a*/ 1 /*comment*/)"
val result = "/*b*/ 1 + /*a*/ 1 /*comment*/"
val hint = hintBeginning + " (1 + 1)"
testFix(text, result, hint)
}
def test_11(): Unit = {
val selected = START + "(/*1*/ 6 /*2*/ /*3*/)" + END
check(selected)
val text = "(<caret>/*1*/ 6 /*2*/ /*3*/)"
val result = "/*1*/ 6 /*2*/\\n\\r/*3*/"
val hint = hintBeginning + " (6)"
testFix(text, result, hint)
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/parentheses/UnnecessaryParenthesesInspectionTest.scala | Scala | apache-2.0 | 4,130 |
package sigmastate.helpers
import java.math.BigInteger
import org.ergoplatform.settings.ErgoAlgos
import org.ergoplatform.{Outputs, ErgoBox}
import scalan.RType
import scorex.crypto.authds.ADDigest
import scorex.crypto.hash.Digest32
import scorex.util.ModifierId
import sigmastate.Values._
import sigmastate.lang.Terms.MethodCall
import sigmastate.serialization.OpCodes
import sigmastate.utxo.SelectField
import sigmastate._
import sigmastate.eval._
import sigmastate.utils.Helpers
import special.collection.CollType
import special.sigma.SigmaDslTesting
import scala.collection.mutable.ArrayBuffer
class SigmaPPrintSpec extends SigmaDslTesting {
property("typeName") {
def test(t: SType, exp: String) = {
SigmaPPrint.typeName(t) shouldBe exp
}
test(SInt, "SInt.type")
test(SCollection(SBox), "SCollection[SBox.type]")
test(SOption(SBox), "SOption[SBox.type]")
test(STuple(SBox, SInt), "STuple")
}
property("Special cases") {
def test(x: Any, expected: String) = {
val res = SigmaPPrint(x).plainText
res shouldBe expected
}
// type handlers
test(SCollectionType(SByte), "SByteArray")
test(SCollectionType(SCollectionType(SByte)), "SByteArray2")
test(SCollectionType(SBoolean), "SBooleanArray")
test(STuple(Vector(SBoolean, SInt)), "SPair(SBoolean, SInt)")
test(RType.BooleanType, "RType.BooleanType")
test(CollType(RType.ByteType), "CollType(RType.ByteType)")
// exception handlers
test(new ArithmeticException("msg"), "new ArithmeticException(\\"msg\\")")
// data handlers
test(10.toByte, "10.toByte")
test(255.toByte, "-1.toByte")
test(10.toShort, "10.toShort")
test(new BigInteger("a", 16), """new BigInteger("a", 16)""")
val negative = new BigInteger("-a", 16)
negative.toString(10) shouldBe "-10"
test(negative, """new BigInteger("-a", 16)""")
test(ErgoAlgos.decodeUnsafe("00ffaa"), "ErgoAlgos.decodeUnsafe(\\"00ffaa\\")")
test(ErgoAlgos.decodeUnsafe("00ffaa"): Seq[Byte], "ErgoAlgos.decodeUnsafe(\\"00ffaa\\")")
test(Array(10), "Array(10)")
test(Array(10): Seq[Int], "Array(10)")
test({val buf = ArrayBuffer.empty[Int]; buf += (10); buf}, "Seq(10)")
test(Helpers.decodeBytes("00ff"), "Helpers.decodeBytes(\\"00ff\\")")
val ge1 = "03358d53f01276211f92d0aefbd278805121d4ff6eb534b777af1ee8abae5b2056"
test(Helpers.decodeGroupElement(ge1), s"""Helpers.decodeGroupElement("${ge1}")""")
test(Helpers.decodeECPoint(ge1), s"""Helpers.decodeECPoint("${ge1}")""")
val t1 = AvlTreeData(
ADDigest @@ ErgoAlgos.decodeUnsafe("000183807f66b301530120ff7fc6bd6601ff01ff7f7d2bedbbffff00187fe89094"),
AvlTreeFlags(false, true, true),
1,
Some(1)
)
test(t1,
"""AvlTreeData(
| ADDigest @@ (
| ErgoAlgos.decodeUnsafe("000183807f66b301530120ff7fc6bd6601ff01ff7f7d2bedbbffff00187fe89094")
| ),
| AvlTreeFlags(false, true, true),
| 1,
| Some(1)
|)""".stripMargin)
test(
new ErgoTree(
16.toByte,
Vector(IntArrayConstant(Array(10, 20))),
Right(BoolToSigmaProp(TrueLeaf))
),
"""new ErgoTree(
| 16.toByte,
| Vector(IntArrayConstant(Coll[Int](10, 20))),
| Right(BoolToSigmaProp(TrueLeaf))
|)""".stripMargin)
test(
CostingBox(
false,
new ErgoBox(
9223372036854775807L,
new ErgoTree(0.toByte, Vector(), Right(BoolToSigmaProp(FalseLeaf))),
Coll(
(Digest32 @@ (ErgoAlgos.decodeUnsafe("6e789ab7b2fffff12280a6cd01557f6fb22b7f80ff7aff8e1f7f15973d7f0001")), 10000000L)
),
Map(),
ModifierId @@ ("bc80ffc00100d60101ffd3d3ab7f73800aff80487fff7fffbb010080ff7f0837"),
0.toShort,
1000000
)
),
"""CostingBox(
| false,
| new ErgoBox(
| 9223372036854775807L,
| new ErgoTree(0.toByte, Vector(), Right(BoolToSigmaProp(FalseLeaf))),
| Coll(
| (
| Digest32 @@ (
| ErgoAlgos.decodeUnsafe("6e789ab7b2fffff12280a6cd01557f6fb22b7f80ff7aff8e1f7f15973d7f0001")
| ),
| 10000000L
| )
| ),
| Map(),
| ModifierId @@ ("bc80ffc00100d60101ffd3d3ab7f73800aff80487fff7fffbb010080ff7f0837"),
| 0.toShort,
| 1000000
| )
|)""".stripMargin
)
// additionalHandlers
test(SGlobal, "SGlobal")
test(SCollection, "SCollection")
test(SOption, "SOption")
test(SInt, "SInt")
test(Outputs, "Outputs")
test(ErgoBox.R0, "ErgoBox.R0")
test(ErgoBox.R9, "ErgoBox.R9")
test(
SelectField.typed[Value[SByte.type]](ValUse(1, STuple(Vector(SByte, SByte))), 1.toByte),
"SelectField.typed[Value[SByte.type]](ValUse(1, SPair(SByte, SByte)), 1.toByte)"
)
test(TrueLeaf, "TrueLeaf")
test(FalseLeaf, "FalseLeaf")
test(IntConstant(10), "IntConstant(10)")
test(ArithOp(IntConstant(1), IntConstant(1), OpCodes.PlusCode), "ArithOp(IntConstant(1), IntConstant(1), OpCode @@ (-102.toByte))")
test(
MethodCall.typed[Value[SCollection[SBox.type]]](
ValUse(1, SContext),
SContext.getMethodByName("dataInputs"),
Vector(),
Map()
),
"""MethodCall.typed[Value[SCollection[SBox.type]]](
| ValUse(1, SContext),
| SContext.getMethodByName("dataInputs"),
| Vector(),
| Map()
|)""".stripMargin)
test(SCollection.tIV, """STypeVar("IV")""")
test(Map(SCollection.tIV -> SInt), """Map(STypeVar("IV") -> SInt)""")
test(
MethodCall.typed[Value[SCollection[SInt.type]]](
ValUse(1, SCollectionType(SBox)),
SCollection.IndicesMethod.withConcreteTypes(Map(SCollection.tIV -> SBox)),
Vector(),
Map()
),
"""MethodCall.typed[Value[SCollection[SInt.type]]](
| ValUse(1, SCollectionType(SBox)),
| SCollection.getMethodByName("indices").withConcreteTypes(Map(STypeVar("IV") -> SBox)),
| Vector(),
| Map()
|)""".stripMargin)
}
}
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/test/scala/sigmastate/helpers/SigmaPPrintSpec.scala | Scala | mit | 6,194 |
object Test3 {
import scala.reflect.Selectable.reflectiveSelectable
def g(x: { type T ; def t: T ; def f(a: T): Boolean }) = x.f(x.t) // error: it has a parameter type with an unstable erasure
g(new { type T = Int; def t = 4; def f(a:T) = true })
g(new { type T = Any; def t = 4; def f(a:T) = true })
val y: { type T = Int; def t = 4; def f(a:T) = true } // error: illegal refinement // error: illegal refinement
= new { type T = Int; def t = 4; def f(a:T) = true }
def h(x: { def f[T](a: T): Int }) = x.f[Int](4) // error: polymorphic refinement method ... no longer allowed
type A = { def foo(x: Int): Unit; def foo(x: String): Unit } // error: overloaded definition // error: overloaded definition
type B = { val foo: Int; def foo: Int } // error: duplicate foo
type C = { var foo: Int } // error: refinements cannot have vars
trait Entry { type Key; val key: Key }
type D = { def foo(e: Entry, k: e.Key): Unit }
val e = new Entry { type Key = Int; val key = 0 }
def i(x: D) = x.foo(e, 1) // error: foo has dependent params
type G = { def foo(x: Int, y: Int): Unit }
def j(x: G) = x.foo(???) // error: missing argument
class H { type S = String; type I }
class I extends H { type I = Int }
type Dep = {
def fun1(x: H, y: x.S): Int
def fun2(x: H, y: x.I): Int
def fun3(y: H): y.S
def fun4(y: H): y.I
}
def k(x: Dep) = {
val y = new I
x.fun1(y, "Hello")
x.fun2(y, 1) // error
x.fun3(y)
x.fun4(y) // error
}
}
| dotty-staging/dotty | tests/neg/structural.scala | Scala | apache-2.0 | 1,499 |
package com.example.spray
import akka.actor.{Props, ActorSystem}
import com.example.spray.add.AddActor
import com.example.spray.hello.HelloActor
/**
* Core is type containing the ``system: ActorSystem`` member. This enables us to use it in our
* apps as well as in our tests.
*/
trait Core {
implicit def system: ActorSystem
}
/**
* This trait implements ``Core`` by starting the required ``ActorSystem`` and registering the
* termination handler to stop the system when the JVM exits.
*/
trait BootedCore extends Core {
/**
* Construct the ActorSystem we will use in our application
*/
implicit lazy val system = ActorSystem("akka-spray")
/**
* Ensure that the constructed ActorSystem is shut down when the JVM shuts down
*/
sys.addShutdownHook(system.shutdown())
}
/**
* This trait contains the actors that make up our application; it can be mixed in with
* ``BootedCore`` for running code or ``TestKit`` for unit and integration tests.
*/
trait CoreActors {
this: Core =>
val add = system.actorOf(Props[AddActor])
val hello = system.actorOf(Props[HelloActor])
} | pjfanning/swagger-spray-sample | src/main/scala/com/example/spray/Core.scala | Scala | apache-2.0 | 1,111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import org.apache.spark.sql.catalyst.expressions.{UnsafeRow, Attribute}
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.Platform
abstract class UnsafeRowJoiner {
def join(row1: UnsafeRow, row2: UnsafeRow): UnsafeRow
}
/**
* A code generator for concatenating two [[UnsafeRow]]s into a single [[UnsafeRow]].
*
* The high level algorithm is:
*
* 1. Concatenate the two bitsets together into a single one, taking padding into account.
* 2. Move fixed-length data.
* 3. Move variable-length data.
* 4. Update the offset position (i.e. the upper 32 bits in the fixed length part) for all
* variable-length data.
*/
object GenerateUnsafeRowJoiner extends CodeGenerator[(StructType, StructType), UnsafeRowJoiner] {
override protected def create(in: (StructType, StructType)): UnsafeRowJoiner = {
create(in._1, in._2)
}
override protected def canonicalize(in: (StructType, StructType)): (StructType, StructType) = in
override protected def bind(in: (StructType, StructType), inputSchema: Seq[Attribute])
: (StructType, StructType) = {
in
}
def create(schema1: StructType, schema2: StructType): UnsafeRowJoiner = {
val offset = Platform.BYTE_ARRAY_OFFSET
val getLong = "Platform.getLong"
val putLong = "Platform.putLong"
val bitset1Words = (schema1.size + 63) / 64
val bitset2Words = (schema2.size + 63) / 64
val outputBitsetWords = (schema1.size + schema2.size + 63) / 64
val bitset1Remainder = schema1.size % 64
// The number of words we can reduce when we concat two rows together.
// The only reduction comes from merging the bitset portion of the two rows, saving 1 word.
val sizeReduction = bitset1Words + bitset2Words - outputBitsetWords
// --------------------- copy bitset from row 1 and row 2 --------------------------- //
val copyBitset = Seq.tabulate(outputBitsetWords) { i =>
val bits = if (bitset1Remainder > 0) {
if (i < bitset1Words - 1) {
s"$getLong(obj1, offset1 + ${i * 8})"
} else if (i == bitset1Words - 1) {
// combine last work of bitset1 and first word of bitset2
s"$getLong(obj1, offset1 + ${i * 8}) | ($getLong(obj2, offset2) << $bitset1Remainder)"
} else if (i - bitset1Words < bitset2Words - 1) {
// combine next two words of bitset2
s"($getLong(obj2, offset2 + ${(i - bitset1Words) * 8}) >>> (64 - $bitset1Remainder))" +
s" | ($getLong(obj2, offset2 + ${(i - bitset1Words + 1) * 8}) << $bitset1Remainder)"
} else {
// last word of bitset2
s"$getLong(obj2, offset2 + ${(i - bitset1Words) * 8}) >>> (64 - $bitset1Remainder)"
}
} else {
// they are aligned by word
if (i < bitset1Words) {
s"$getLong(obj1, offset1 + ${i * 8})"
} else {
s"$getLong(obj2, offset2 + ${(i - bitset1Words) * 8})"
}
}
s"$putLong(buf, ${offset + i * 8}, $bits);"
}.mkString("\\n")
// --------------------- copy fixed length portion from row 1 ----------------------- //
var cursor = offset + outputBitsetWords * 8
val copyFixedLengthRow1 = s"""
|// Copy fixed length data for row1
|Platform.copyMemory(
| obj1, offset1 + ${bitset1Words * 8},
| buf, $cursor,
| ${schema1.size * 8});
""".stripMargin
cursor += schema1.size * 8
// --------------------- copy fixed length portion from row 2 ----------------------- //
val copyFixedLengthRow2 = s"""
|// Copy fixed length data for row2
|Platform.copyMemory(
| obj2, offset2 + ${bitset2Words * 8},
| buf, $cursor,
| ${schema2.size * 8});
""".stripMargin
cursor += schema2.size * 8
// --------------------- copy variable length portion from row 1 ----------------------- //
val numBytesBitsetAndFixedRow1 = (bitset1Words + schema1.size) * 8
val copyVariableLengthRow1 = s"""
|// Copy variable length data for row1
|long numBytesVariableRow1 = row1.getSizeInBytes() - $numBytesBitsetAndFixedRow1;
|Platform.copyMemory(
| obj1, offset1 + ${(bitset1Words + schema1.size) * 8},
| buf, $cursor,
| numBytesVariableRow1);
""".stripMargin
// --------------------- copy variable length portion from row 2 ----------------------- //
val numBytesBitsetAndFixedRow2 = (bitset2Words + schema2.size) * 8
val copyVariableLengthRow2 = s"""
|// Copy variable length data for row2
|long numBytesVariableRow2 = row2.getSizeInBytes() - $numBytesBitsetAndFixedRow2;
|Platform.copyMemory(
| obj2, offset2 + ${(bitset2Words + schema2.size) * 8},
| buf, $cursor + numBytesVariableRow1,
| numBytesVariableRow2);
""".stripMargin
// ------------- update fixed length data for variable length data type --------------- //
val updateOffset = (schema1 ++ schema2).zipWithIndex.map { case (field, i) =>
// Skip fixed length data types, and only generate code for variable length data
if (UnsafeRow.isFixedLength(field.dataType)) {
""
} else {
// Number of bytes to increase for the offset. Note that since in UnsafeRow we store the
// offset in the upper 32 bit of the words, we can just shift the offset to the left by
// 32 and increment that amount in place.
val shift =
if (i < schema1.size) {
s"${(outputBitsetWords - bitset1Words + schema2.size) * 8}L"
} else {
s"(${(outputBitsetWords - bitset2Words + schema1.size) * 8}L + numBytesVariableRow1)"
}
val cursor = offset + outputBitsetWords * 8 + i * 8
s"""
|$putLong(buf, $cursor, $getLong(buf, $cursor) + ($shift << 32));
""".stripMargin
}
}.mkString("\\n")
// ------------------------ Finally, put everything together --------------------------- //
val code = s"""
|public Object generate($exprType[] exprs) {
| return new SpecificUnsafeRowJoiner();
|}
|
|class SpecificUnsafeRowJoiner extends ${classOf[UnsafeRowJoiner].getName} {
| private byte[] buf = new byte[64];
| private UnsafeRow out = new UnsafeRow();
|
| public UnsafeRow join(UnsafeRow row1, UnsafeRow row2) {
| // row1: ${schema1.size} fields, $bitset1Words words in bitset
| // row2: ${schema2.size}, $bitset2Words words in bitset
| // output: ${schema1.size + schema2.size} fields, $outputBitsetWords words in bitset
| final int sizeInBytes = row1.getSizeInBytes() + row2.getSizeInBytes();
| if (sizeInBytes > buf.length) {
| buf = new byte[sizeInBytes];
| }
|
| final Object obj1 = row1.getBaseObject();
| final long offset1 = row1.getBaseOffset();
| final Object obj2 = row2.getBaseObject();
| final long offset2 = row2.getBaseOffset();
|
| $copyBitset
| $copyFixedLengthRow1
| $copyFixedLengthRow2
| $copyVariableLengthRow1
| $copyVariableLengthRow2
| $updateOffset
|
| out.pointTo(buf, ${schema1.size + schema2.size}, sizeInBytes - $sizeReduction);
|
| return out;
| }
|}
""".stripMargin
logDebug(s"SpecificUnsafeRowJoiner($schema1, $schema2):\\n${CodeFormatter.format(code)}")
val c = compile(code)
c.generate(Array.empty).asInstanceOf[UnsafeRowJoiner]
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateUnsafeRowJoiner.scala | Scala | apache-2.0 | 8,420 |
package assigner
import assigner.model._
import org.json4s.DefaultFormats
import org.junit.runner.RunWith
import org.scalacheck.Gen._
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.PropertyChecks
import org.json4s.jackson.Serialization._
@RunWith(classOf[JUnitRunner])
class Examples extends FunSuite with Matchers with PropertyChecks with DataGen {
implicit val formats = DefaultFormats
val depth = 3
val courses = courseGen(
settings = Settings(),
endpoints = Endpoints("http://localhost:8080/echo", "http://localhost:8080/echo"),
numStudentsGen = choose(20, 30),
numGroupsGen = const(5),
numSkillsGen = choose(2, 3),
groupSizeGen = minMaxGroupSizeGen(4, 6))
implicit override val generatorDrivenConfig =
PropertyCheckConfig(minSuccessful = 10, workers = 2)
test("positive examples") {
forAll(courses) { course: Course =>
whenever(course.validate.errors.isEmpty) {
println(write(course))
}
}
}
}
| joroKr21/IoS-Algorithm | src/test/scala/assigner/Examples.scala | Scala | mit | 1,031 |
package gh.test.gh2011.payload
import gh2011.models.{FollowEventPayload, DeleteEventPayload}
import net.liftweb.json._
import org.scalatest.{FlatSpec, Matchers}
class FollowEventPayloadTest extends FlatSpec with Matchers
{
"A valid FollowEvent payload" must "be correctly parsed" in {
val json = parse(
"""
| {
|
| "target":{
| "gravatar_id":"b50d3f9ed186e43dfcc867571749393c",
| "repos":8,
| "followers":151,
| "login":"joearms"
| },
| "actor":"mulander",
| "actor_gravatar":"ec96450f3351c57eeaea1c9c2599e85d"
|
|}
""".stripMargin)
FollowEventPayload(json) shouldBe 'defined
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2011/payload/FollowEventPayloadTest.scala | Scala | mit | 792 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import _root_.java.io.File
import _root_.java.io.PrintWriter
import com.github.nscala_time.time.Imports.DateTime
import grizzled.slf4j.Logger
import org.apache.predictionio.annotation.DeveloperApi
import org.apache.predictionio.core.BaseEvaluator
import org.apache.predictionio.core.BaseEvaluatorResult
import org.apache.predictionio.data.storage.Storage
import org.apache.predictionio.workflow.JsonExtractor
import org.apache.predictionio.workflow.JsonExtractorOption.Both
import org.apache.predictionio.workflow.NameParamsSerializer
import org.apache.predictionio.workflow.WorkflowParams
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.json4s.native.Serialization.write
import org.json4s.native.Serialization.writePretty
import scala.language.existentials
/** Case class storing a primary score, and other scores
*
* @param score Primary metric score
* @param otherScores Other scores this metric might have
* @tparam R Type of the primary metric score
* @group Evaluation
*/
case class MetricScores[R](
score: R,
otherScores: Seq[Any])
/** Contains all results of a [[MetricEvaluator]]
*
* @param bestScore The best score among all iterations
* @param bestEngineParams The set of engine parameters that yielded the best score
* @param bestIdx The index of iteration that yielded the best score
* @param metricHeader Brief description of the primary metric score
* @param otherMetricHeaders Brief descriptions of other metric scores
* @param engineParamsScores All sets of engine parameters and corresponding metric scores
* @param outputPath An optional output path where scores are saved
* @tparam R Type of the primary metric score
* @group Evaluation
*/
case class MetricEvaluatorResult[R](
bestScore: MetricScores[R],
bestEngineParams: EngineParams,
bestIdx: Int,
metricHeader: String,
otherMetricHeaders: Seq[String],
engineParamsScores: Seq[(EngineParams, MetricScores[R])],
outputPath: Option[String])
extends BaseEvaluatorResult {
override def toOneLiner(): String = {
val idx = engineParamsScores.map(_._1).indexOf(bestEngineParams)
s"Best Params Index: $idx Score: ${bestScore.score}"
}
override def toJSON(): String = {
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
write(this)
}
override def toHTML(): String = html.metric_evaluator().toString()
override def toString: String = {
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
val bestEPStr = JsonExtractor.engineParamstoPrettyJson(Both, bestEngineParams)
val strings = Seq(
"MetricEvaluatorResult:",
s" # engine params evaluated: ${engineParamsScores.size}") ++
Seq(
"Optimal Engine Params:",
s" $bestEPStr",
"Metrics:",
s" $metricHeader: ${bestScore.score}") ++
otherMetricHeaders.zip(bestScore.otherScores).map {
case (h, s) => s" $h: $s"
} ++
outputPath.toSeq.map {
p => s"The best variant params can be found in $p"
}
strings.mkString("\n")
}
}
/** Companion object of [[MetricEvaluator]]
*
* @group Evaluation
*/
object MetricEvaluator {
def apply[EI, Q, P, A, R](
metric: Metric[EI, Q, P, A, R],
otherMetrics: Seq[Metric[EI, Q, P, A, _]],
outputPath: String): MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
otherMetrics,
Some(outputPath))
}
def apply[EI, Q, P, A, R](
metric: Metric[EI, Q, P, A, R],
otherMetrics: Seq[Metric[EI, Q, P, A, _]])
: MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
otherMetrics,
None)
}
def apply[EI, Q, P, A, R](metric: Metric[EI, Q, P, A, R])
: MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
Seq[Metric[EI, Q, P, A, _]](),
None)
}
case class NameParams(name: String, params: Params) {
def this(np: (String, Params)) = this(np._1, np._2)
}
case class EngineVariant(
id: String,
description: String,
engineFactory: String,
datasource: NameParams,
preparator: NameParams,
algorithms: Seq[NameParams],
serving: NameParams) {
def this(evaluation: Evaluation, engineParams: EngineParams) = this(
id = "",
description = "",
engineFactory = evaluation.getClass.getName,
datasource = new NameParams(engineParams.dataSourceParams),
preparator = new NameParams(engineParams.preparatorParams),
algorithms = engineParams.algorithmParamsList.map(np => new NameParams(np)),
serving = new NameParams(engineParams.servingParams))
}
}
/** :: DeveloperApi ::
* Do no use this directly. Use [[MetricEvaluator$]] instead. This is an
* implementation of [[org.apache.predictionio.core.BaseEvaluator]] that evaluates
* prediction performance based on metric scores.
*
* @param metric Primary metric
* @param otherMetrics Other metrics
* @param outputPath Optional output path to save evaluation results
* @tparam EI Evaluation information type
* @tparam Q Query class
* @tparam P Predicted result class
* @tparam A Actual result class
* @tparam R Metric result class
* @group Evaluation
*/
@DeveloperApi
class MetricEvaluator[EI, Q, P, A, R] (
val metric: Metric[EI, Q, P, A, R],
val otherMetrics: Seq[Metric[EI, Q, P, A, _]],
val outputPath: Option[String])
extends BaseEvaluator[EI, Q, P, A, MetricEvaluatorResult[R]] {
@transient lazy val logger = Logger[this.type]
@transient val engineInstances = Storage.getMetaDataEngineInstances()
def saveEngineJson(
evaluation: Evaluation,
engineParams: EngineParams,
outputPath: String) {
val now = DateTime.now
val evalClassName = evaluation.getClass.getName
val variant = MetricEvaluator.EngineVariant(
id = s"$evalClassName $now",
description = "",
engineFactory = evalClassName,
datasource = new MetricEvaluator.NameParams(engineParams.dataSourceParams),
preparator = new MetricEvaluator.NameParams(engineParams.preparatorParams),
algorithms = engineParams.algorithmParamsList.map(np => new MetricEvaluator.NameParams(np)),
serving = new MetricEvaluator.NameParams(engineParams.servingParams))
implicit lazy val formats = Utils.json4sDefaultFormats
logger.info(s"Writing best variant params to disk ($outputPath)...")
val writer = new PrintWriter(new File(outputPath))
writer.write(writePretty(variant))
writer.close()
}
def evaluateBase(
sc: SparkContext,
evaluation: Evaluation,
engineEvalDataSet: Seq[(EngineParams, Seq[(EI, RDD[(Q, P, A)])])],
params: WorkflowParams): MetricEvaluatorResult[R] = {
val evalResultList: Seq[(EngineParams, MetricScores[R])] = engineEvalDataSet
.zipWithIndex
.par
.map { case ((engineParams, evalDataSet), idx) =>
val metricScores = MetricScores[R](
metric.calculate(sc, evalDataSet),
otherMetrics.map(_.calculate(sc, evalDataSet)))
(engineParams, metricScores)
}
.seq
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
evalResultList.zipWithIndex.foreach { case ((ep, r), idx) =>
logger.info(s"Iteration $idx")
logger.info(s"EngineParams: ${JsonExtractor.engineParamsToJson(Both, ep)}")
logger.info(s"Result: $r")
}
// use max. take implicit from Metric.
val ((bestEngineParams, bestScore), bestIdx) = evalResultList
.zipWithIndex
.reduce { (x, y) =>
if (metric.compare(x._1._2.score, y._1._2.score) >= 0) x else y
}
// save engine params if it is set.
outputPath.foreach { path => saveEngineJson(evaluation, bestEngineParams, path) }
MetricEvaluatorResult(
bestScore = bestScore,
bestEngineParams = bestEngineParams,
bestIdx = bestIdx,
metricHeader = metric.header,
otherMetricHeaders = otherMetrics.map(_.header),
engineParamsScores = evalResultList,
outputPath = outputPath)
}
}
| himanshudhami/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/MetricEvaluator.scala | Scala | apache-2.0 | 8,972 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
/**
* All doc-comments marked as "MDN" are by Mozilla Contributors,
* distributed under the Creative Commons Attribution-ShareAlike license from
* https://developer.mozilla.org/en-US/docs/Web/Reference/API
*/
package scala.scalajs.js
import scala.language.implicitConversions
import scala.scalajs.js
import scala.scalajs.js.annotation._
/** Operations on JavaScript strings.
*
* The methods with an equivalent signature in [[java.lang.String String]] but
* with a different meaning are prefixed by `js` in this trait.
*/
trait JSStringOps extends js.Any {
/**
* Returns the index within the calling String object of the first occurrence
* of the specified value, starting the search at fromIndex,
*
* returns -1 if the value is not found.
*
* MDN
*/
@JSName("indexOf")
def jsIndexOf(searchString: String, position: Int): Int
@JSName("indexOf")
def jsIndexOf(searchString: String): Int
/**
* Returns the index within the calling String object of the last occurrence
* of the specified value, or -1 if not found. The calling string is searched
* backward, starting at fromIndex.
*
* MDN
*/
@JSName("lastIndexOf")
def jsLastIndexOf(searchString: String, position: Int): Int
@JSName("lastIndexOf")
def jsLastIndexOf(searchString: String): Int
/**
* Returns a number indicating whether a reference string comes before or
* after or is the same as the given string in sort order. The new locales
* and options arguments let applications specify the language whose sort
* order should be used and customize the behavior of the function. In older
* implementations, which ignore the locales and options arguments, the locale
* and sort order used are entirely implementation dependent.
*
* MDN
*/
def localeCompare(that: String): Int
/**
* Used to retrieve the matches when matching a string against a regular
* expression.
*
* If the regular expression does not include the g flag, returns the same
* result as regexp.exec(string). The returned Array has an extra input
* property, which contains the original string that was parsed. In addition,
* it has an index property, which represents the zero-based index of the
* match in the string.
*
* If the regular expression includes the g flag, the method returns an Array
* containing all matches. If there were no matches, the method returns null.
*
* MDN
*/
def `match`(regexp: String): js.Array[String]
def `match`(regexp: js.RegExp): js.Array[String]
/**
* Returns a new string with some or all matches of a pattern replaced by a
* replacement. The pattern can be a string or a RegExp, and the replacement
* can be a string or a function to be called for each match.
*
* This method does not change the String object it is called on. It simply
* returns a new string.
*
* To perform a global search and replace, either include the g switch in the
* regular expression or if the first parameter is a string, include g in the
* flags parameter.
*
* MDN
*/
@JSName("replace")
def jsReplace(searchValue: String, replaceValue: String): String
@JSName("replace")
def jsReplace(searchValue: String, replaceValue: js.Any): String
@JSName("replace")
def jsReplace(searchValue: js.RegExp, replaceValue: String): String
@JSName("replace")
def jsReplace(searchValue: js.RegExp, replaceValue: js.Any): String
/**
* If successful, search returns the index of the regular expression inside
* the string. Otherwise, it returns -1.
*
* When you want to know whether a pattern is found in a string use search
* (similar to the regular expression test method); for more information
* (but slower execution) use match (similar to the regular expression exec
* method).
*
* MDN
*/
def search(regexp: String): Int
def search(regexp: js.RegExp): Int
/**
* slice extracts the text from one string and returns a new string. Changes
* to the text in one string do not affect the other string.
*
* slice extracts up to but not including endSlice. string.slice(1,4) extracts
* the second character through the fourth character (characters indexed 1, 2,
* and 3).
*
* As an example, string.slice(2,-1) extracts the third character through the
* second to last character in the string.
*
* MDN
*/
@JSName("slice")
def jsSlice(start: Int, end: Int): String
@JSName("slice")
def jsSlice(start: Int): String
/**
* Splits a String object into an array of strings by separating the string
* into substrings.
*
* When found, separator is removed from the string and the substrings are
* returned in an array. If separator is omitted, the array contains one
* element consisting of the entire string. If separator is an empty string,
* string is converted to an array of characters.
*
* If separator is a regular expression that contains capturing parentheses,
* then each time separator is matched, the results (including any undefined
* results) of the capturing parentheses are spliced into the output array.
* However, not all browsers support this capability.
*
* Note: When the string is empty, split returns an array containing one
* empty string, rather than an empty array.
*
* MDN
*/
@JSName("split")
def jsSplit(separator: String, limit: Int): js.Array[String]
@JSName("split")
def jsSplit(separator: String): js.Array[String]
@JSName("split")
def jsSplit(separator: js.RegExp, limit: Int): js.Array[String]
@JSName("split")
def jsSplit(separator: js.RegExp): js.Array[String]
/**
* Returns a subset of a string between one index and another, or through
* the end of the string.
*
* MDN
*/
@JSName("substring")
def jsSubstring(start: Int, end: Int): String
@JSName("substring")
def jsSubstring(start: Int): String
/**
* The toLocaleLowerCase method returns the value of the string converted to
* lower case according to any locale-specific case mappings. toLocaleLowerCase
* does not affect the value of the string itself. In most cases, this will
* produce the same result as toLowerCase(), but for some locales, such as
* Turkish, whose case mappings do not follow the default case mappings in Unicode,
* there may be a different result.
*
* MDN
*/
def toLocaleLowerCase(): String
/**
* The toLocaleUpperCase method returns the value of the string converted to
* upper case according to any locale-specific case mappings. toLocaleUpperCase
* does not affect the value of the string itself. In most cases, this will
* produce the same result as toUpperCase(), but for some locales, such as
* Turkish, whose case mappings do not follow the default case mappings in Unicode,
* there may be a different result.
*
* MDN
*/
def toLocaleUpperCase(): String
/** <span class="badge badge-ecma6" style="float: right;">ECMAScript 6</span>
* Returns the Unicode Normalization Form of this string.
*/
def normalize(form: js.UnicodeNormalizationForm): String
/** <span class="badge badge-ecma6" style="float: right;">ECMAScript 6</span>
* Returns the Unicode Normalization Form of this string, with the NFC form.
*/
def normalize(): String
}
object JSStringOps {
implicit def enableJSStringOps(x: String): js.JSStringOps =
x.asInstanceOf[js.JSStringOps]
}
| scala-js/scala-js | library/src/main/scala/scala/scalajs/js/JSStringOps.scala | Scala | apache-2.0 | 7,731 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{File, FileInputStream, FileOutputStream}
import java.net.URI
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap => MutableHashMap}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.YarnClientApplication
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.util.Records
import org.mockito.ArgumentMatchers.{any, anyBoolean, anyShort, eq => meq}
import org.mockito.Mockito.{spy, verify}
import org.scalatest.Matchers
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TestUtils}
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.config._
import org.apache.spark.resource.ResourceID
import org.apache.spark.util.{SparkConfWithEnv, Utils}
class ClientSuite extends SparkFunSuite with Matchers {
private def doReturn(value: Any) = org.mockito.Mockito.doReturn(value, Seq.empty: _*)
import Client._
var oldSystemProperties: Properties = null
test("default Yarn application classpath") {
getDefaultYarnApplicationClasspath should be(Fixtures.knownDefYarnAppCP)
}
test("default MR application classpath") {
getDefaultMRApplicationClasspath should be(Fixtures.knownDefMRAppCP)
}
test("resultant classpath for an application that defines a classpath for YARN") {
withAppConf(Fixtures.mapYARNAppConf) { conf =>
val env = newEnv
populateHadoopClasspath(conf, env)
classpath(env) should be(Fixtures.knownYARNAppCP +: getDefaultMRApplicationClasspath)
}
}
test("resultant classpath for an application that defines a classpath for MR") {
withAppConf(Fixtures.mapMRAppConf) { conf =>
val env = newEnv
populateHadoopClasspath(conf, env)
classpath(env) should be(getDefaultYarnApplicationClasspath :+ Fixtures.knownMRAppCP)
}
}
test("resultant classpath for an application that defines both classpaths, YARN and MR") {
withAppConf(Fixtures.mapAppConf) { conf =>
val env = newEnv
populateHadoopClasspath(conf, env)
classpath(env) should be(Array(Fixtures.knownYARNAppCP, Fixtures.knownMRAppCP))
}
}
private val SPARK = "local:/sparkJar"
private val USER = "local:/userJar"
private val ADDED = "local:/addJar1,local:/addJar2,/addJar3"
private val PWD = "{{PWD}}"
test("Local jar URIs") {
val conf = new Configuration()
val sparkConf = new SparkConf()
.set(SPARK_JARS, Seq(SPARK))
.set(USER_CLASS_PATH_FIRST, true)
.set("spark.yarn.dist.jars", ADDED)
val env = new MutableHashMap[String, String]()
val args = new ClientArguments(Array("--jar", USER))
populateClasspath(args, conf, sparkConf, env)
val cp = env("CLASSPATH").split(":|;|<CPS>")
s"$SPARK,$USER,$ADDED".split(",").foreach({ entry =>
val uri = new URI(entry)
if (Utils.LOCAL_SCHEME.equals(uri.getScheme())) {
cp should contain (uri.getPath())
} else {
cp should not contain (uri.getPath())
}
})
cp should not contain ("local")
cp should contain(PWD)
cp should contain (s"$PWD${Path.SEPARATOR}${LOCALIZED_CONF_DIR}")
cp should not contain (APP_JAR)
}
test("Jar path propagation through SparkConf") {
val conf = new Configuration()
val sparkConf = new SparkConf()
.set(SPARK_JARS, Seq(SPARK))
.set("spark.yarn.dist.jars", ADDED)
val client = createClient(sparkConf, args = Array("--jar", USER))
doReturn(new Path("/")).when(client).copyFileToRemote(any(classOf[Path]),
any(classOf[Path]), anyShort(), any(classOf[MutableHashMap[URI, Path]]), anyBoolean(), any())
val tempDir = Utils.createTempDir()
try {
// Because we mocked "copyFileToRemote" above to avoid having to create fake local files,
// we need to create a fake config archive in the temp dir to avoid having
// prepareLocalResources throw an exception.
new FileOutputStream(new File(tempDir, LOCALIZED_CONF_ARCHIVE)).close()
client.prepareLocalResources(new Path(tempDir.getAbsolutePath()), Nil)
sparkConf.get(APP_JAR) should be (Some(USER))
// The non-local path should be propagated by name only, since it will end up in the app's
// staging dir.
val expected = ADDED.split(",")
.map(p => {
val uri = new URI(p)
if (Utils.LOCAL_SCHEME == uri.getScheme()) {
p
} else {
Option(uri.getFragment()).getOrElse(new File(p).getName())
}
})
.mkString(",")
sparkConf.get(SECONDARY_JARS) should be (Some(expected.split(",").toSeq))
} finally {
Utils.deleteRecursively(tempDir)
}
}
test("Cluster path translation") {
val conf = new Configuration()
val sparkConf = new SparkConf()
.set(SPARK_JARS, Seq("local:/localPath/spark.jar"))
.set(GATEWAY_ROOT_PATH, "/localPath")
.set(REPLACEMENT_ROOT_PATH, "/remotePath")
getClusterPath(sparkConf, "/localPath") should be ("/remotePath")
getClusterPath(sparkConf, "/localPath/1:/localPath/2") should be (
"/remotePath/1:/remotePath/2")
val env = new MutableHashMap[String, String]()
populateClasspath(null, conf, sparkConf, env, extraClassPath = Some("/localPath/my1.jar"))
val cp = classpath(env)
cp should contain ("/remotePath/spark.jar")
cp should contain ("/remotePath/my1.jar")
}
test("configuration and args propagate through createApplicationSubmissionContext") {
// When parsing tags, duplicates and leading/trailing whitespace should be removed.
// Spaces between non-comma strings should be preserved as single tags. Empty strings may or
// may not be removed depending on the version of Hadoop being used.
val sparkConf = new SparkConf()
.set(APPLICATION_TAGS.key, ",tag1, dup,tag2 , ,multi word , dup")
.set(MAX_APP_ATTEMPTS, 42)
.set("spark.app.name", "foo-test-app")
.set(QUEUE_NAME, "staging-queue")
val args = new ClientArguments(Array())
val appContext = Records.newRecord(classOf[ApplicationSubmissionContext])
val getNewApplicationResponse = Records.newRecord(classOf[GetNewApplicationResponse])
val containerLaunchContext = Records.newRecord(classOf[ContainerLaunchContext])
val client = new Client(args, sparkConf, null)
client.createApplicationSubmissionContext(
new YarnClientApplication(getNewApplicationResponse, appContext),
containerLaunchContext)
appContext.getApplicationName should be ("foo-test-app")
appContext.getQueue should be ("staging-queue")
appContext.getAMContainerSpec should be (containerLaunchContext)
appContext.getApplicationType should be ("SPARK")
appContext.getClass.getMethods.filter(_.getName == "getApplicationTags").foreach { method =>
val tags = method.invoke(appContext).asInstanceOf[java.util.Set[String]]
tags should contain allOf ("tag1", "dup", "tag2", "multi word")
tags.asScala.count(_.nonEmpty) should be (4)
}
appContext.getMaxAppAttempts should be (42)
}
test("spark.yarn.jars with multiple paths and globs") {
val libs = Utils.createTempDir()
val single = Utils.createTempDir()
val jar1 = TestUtils.createJarWithFiles(Map(), libs)
val jar2 = TestUtils.createJarWithFiles(Map(), libs)
val jar3 = TestUtils.createJarWithFiles(Map(), single)
val jar4 = TestUtils.createJarWithFiles(Map(), single)
val jarsConf = Seq(
s"${libs.getAbsolutePath()}/*",
jar3.getPath(),
s"local:${jar4.getPath()}",
s"local:${single.getAbsolutePath()}/*")
val sparkConf = new SparkConf().set(SPARK_JARS, jarsConf)
val client = createClient(sparkConf)
val tempDir = Utils.createTempDir()
client.prepareLocalResources(new Path(tempDir.getAbsolutePath()), Nil)
assert(sparkConf.get(SPARK_JARS) ===
Some(Seq(s"local:${jar4.getPath()}", s"local:${single.getAbsolutePath()}/*")))
verify(client).copyFileToRemote(any(classOf[Path]), meq(new Path(jar1.toURI())), anyShort(),
any(classOf[MutableHashMap[URI, Path]]), anyBoolean(), any())
verify(client).copyFileToRemote(any(classOf[Path]), meq(new Path(jar2.toURI())), anyShort(),
any(classOf[MutableHashMap[URI, Path]]), anyBoolean(), any())
verify(client).copyFileToRemote(any(classOf[Path]), meq(new Path(jar3.toURI())), anyShort(),
any(classOf[MutableHashMap[URI, Path]]), anyBoolean(), any())
val cp = classpath(client)
cp should contain (buildPath(PWD, LOCALIZED_LIB_DIR, "*"))
cp should not contain (jar3.getPath())
cp should contain (jar4.getPath())
cp should contain (buildPath(single.getAbsolutePath(), "*"))
}
test("distribute jars archive") {
val temp = Utils.createTempDir()
val archive = TestUtils.createJarWithFiles(Map(), temp)
val sparkConf = new SparkConf().set(SPARK_ARCHIVE, archive.getPath())
val client = createClient(sparkConf)
client.prepareLocalResources(new Path(temp.getAbsolutePath()), Nil)
verify(client).copyFileToRemote(any(classOf[Path]), meq(new Path(archive.toURI())), anyShort(),
any(classOf[MutableHashMap[URI, Path]]), anyBoolean(), any())
classpath(client) should contain (buildPath(PWD, LOCALIZED_LIB_DIR, "*"))
sparkConf.set(SPARK_ARCHIVE, Utils.LOCAL_SCHEME + ":" + archive.getPath())
intercept[IllegalArgumentException] {
client.prepareLocalResources(new Path(temp.getAbsolutePath()), Nil)
}
}
test("distribute archive multiple times") {
val libs = Utils.createTempDir()
// Create jars dir and RELEASE file to avoid IllegalStateException.
val jarsDir = new File(libs, "jars")
assert(jarsDir.mkdir())
new FileOutputStream(new File(libs, "RELEASE")).close()
val userLib1 = Utils.createTempDir()
val testJar = TestUtils.createJarWithFiles(Map(), userLib1)
// Case 1: FILES_TO_DISTRIBUTE and ARCHIVES_TO_DISTRIBUTE can't have duplicate files
val sparkConf = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(FILES_TO_DISTRIBUTE, Seq(testJar.getPath))
.set(ARCHIVES_TO_DISTRIBUTE, Seq(testJar.getPath))
val client = createClient(sparkConf)
val tempDir = Utils.createTempDir()
intercept[IllegalArgumentException] {
client.prepareLocalResources(new Path(tempDir.getAbsolutePath()), Nil)
}
// Case 2: FILES_TO_DISTRIBUTE can't have duplicate files.
val sparkConfFiles = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(FILES_TO_DISTRIBUTE, Seq(testJar.getPath, testJar.getPath))
val clientFiles = createClient(sparkConfFiles)
val tempDirForFiles = Utils.createTempDir()
intercept[IllegalArgumentException] {
clientFiles.prepareLocalResources(new Path(tempDirForFiles.getAbsolutePath()), Nil)
}
// Case 3: ARCHIVES_TO_DISTRIBUTE can't have duplicate files.
val sparkConfArchives = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(ARCHIVES_TO_DISTRIBUTE, Seq(testJar.getPath, testJar.getPath))
val clientArchives = createClient(sparkConfArchives)
val tempDirForArchives = Utils.createTempDir()
intercept[IllegalArgumentException] {
clientArchives.prepareLocalResources(new Path(tempDirForArchives.getAbsolutePath()), Nil)
}
// Case 4: FILES_TO_DISTRIBUTE can have unique file.
val sparkConfFilesUniq = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(FILES_TO_DISTRIBUTE, Seq(testJar.getPath))
val clientFilesUniq = createClient(sparkConfFilesUniq)
val tempDirForFilesUniq = Utils.createTempDir()
clientFilesUniq.prepareLocalResources(new Path(tempDirForFilesUniq.getAbsolutePath()), Nil)
// Case 5: ARCHIVES_TO_DISTRIBUTE can have unique file.
val sparkConfArchivesUniq = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(ARCHIVES_TO_DISTRIBUTE, Seq(testJar.getPath))
val clientArchivesUniq = createClient(sparkConfArchivesUniq)
val tempDirArchivesUniq = Utils.createTempDir()
clientArchivesUniq.prepareLocalResources(new Path(tempDirArchivesUniq.getAbsolutePath()), Nil)
}
test("distribute local spark jars") {
val temp = Utils.createTempDir()
val jarsDir = new File(temp, "jars")
assert(jarsDir.mkdir())
val jar = TestUtils.createJarWithFiles(Map(), jarsDir)
new FileOutputStream(new File(temp, "RELEASE")).close()
val sparkConf = new SparkConfWithEnv(Map("SPARK_HOME" -> temp.getAbsolutePath()))
val client = createClient(sparkConf)
client.prepareLocalResources(new Path(temp.getAbsolutePath()), Nil)
classpath(client) should contain (buildPath(PWD, LOCALIZED_LIB_DIR, "*"))
}
test("ignore same name jars") {
val libs = Utils.createTempDir()
val jarsDir = new File(libs, "jars")
assert(jarsDir.mkdir())
new FileOutputStream(new File(libs, "RELEASE")).close()
val userLib1 = Utils.createTempDir()
val userLib2 = Utils.createTempDir()
val jar1 = TestUtils.createJarWithFiles(Map(), jarsDir)
val jar2 = TestUtils.createJarWithFiles(Map(), userLib1)
// Copy jar2 to jar3 with same name
val jar3 = {
val target = new File(userLib2, new File(jar2.toURI).getName)
val input = new FileInputStream(jar2.getPath)
val output = new FileOutputStream(target)
Utils.copyStream(input, output, closeStreams = true)
target.toURI.toURL
}
val sparkConf = new SparkConfWithEnv(Map("SPARK_HOME" -> libs.getAbsolutePath))
.set(JARS_TO_DISTRIBUTE, Seq(jar2.getPath, jar3.getPath))
val client = createClient(sparkConf)
val tempDir = Utils.createTempDir()
client.prepareLocalResources(new Path(tempDir.getAbsolutePath()), Nil)
// Only jar2 will be added to SECONDARY_JARS, jar3 which has the same name with jar2 will be
// ignored.
sparkConf.get(SECONDARY_JARS) should be (Some(Seq(new File(jar2.toURI).getName)))
}
Seq(
"client" -> YARN_AM_RESOURCE_TYPES_PREFIX,
"cluster" -> YARN_DRIVER_RESOURCE_TYPES_PREFIX
).foreach { case (deployMode, prefix) =>
test(s"custom resource request ($deployMode mode)") {
assume(ResourceRequestHelper.isYarnResourceTypesAvailable())
val resources = Map("fpga" -> 2, "gpu" -> 3)
ResourceRequestTestHelper.initializeResourceTypes(resources.keys.toSeq)
val conf = new SparkConf().set(SUBMIT_DEPLOY_MODE, deployMode)
resources.foreach { case (name, v) =>
conf.set(prefix + name, v.toString)
}
val appContext = Records.newRecord(classOf[ApplicationSubmissionContext])
val getNewApplicationResponse = Records.newRecord(classOf[GetNewApplicationResponse])
val containerLaunchContext = Records.newRecord(classOf[ContainerLaunchContext])
val client = new Client(new ClientArguments(Array()), conf, null)
client.createApplicationSubmissionContext(
new YarnClientApplication(getNewApplicationResponse, appContext),
containerLaunchContext)
resources.foreach { case (name, value) =>
ResourceRequestTestHelper.getRequestedValue(appContext.getResource, name) should be (value)
}
}
}
test("custom driver resource request yarn config and spark config fails") {
assume(ResourceRequestHelper.isYarnResourceTypesAvailable())
val resources = Map(YARN_GPU_RESOURCE_CONFIG -> "gpu", YARN_FPGA_RESOURCE_CONFIG -> "fpga")
ResourceRequestTestHelper.initializeResourceTypes(resources.keys.toSeq)
val conf = new SparkConf().set(SUBMIT_DEPLOY_MODE, "cluster")
resources.keys.foreach { yarnName =>
conf.set(s"${YARN_DRIVER_RESOURCE_TYPES_PREFIX}${yarnName}", "2")
}
resources.values.foreach { rName =>
conf.set(ResourceID(SPARK_DRIVER_PREFIX, rName).amountConf, "3")
}
val error = intercept[SparkException] {
ResourceRequestHelper.validateResources(conf)
}.getMessage()
assert(error.contains("Do not use spark.yarn.driver.resource.yarn.io/fpga," +
" please use spark.driver.resource.fpga.amount"))
assert(error.contains("Do not use spark.yarn.driver.resource.yarn.io/gpu," +
" please use spark.driver.resource.gpu.amount"))
}
test("custom executor resource request yarn config and spark config fails") {
assume(ResourceRequestHelper.isYarnResourceTypesAvailable())
val resources = Map(YARN_GPU_RESOURCE_CONFIG -> "gpu", YARN_FPGA_RESOURCE_CONFIG -> "fpga")
ResourceRequestTestHelper.initializeResourceTypes(resources.keys.toSeq)
val conf = new SparkConf().set(SUBMIT_DEPLOY_MODE, "cluster")
resources.keys.foreach { yarnName =>
conf.set(s"${YARN_EXECUTOR_RESOURCE_TYPES_PREFIX}${yarnName}", "2")
}
resources.values.foreach { rName =>
conf.set(ResourceID(SPARK_EXECUTOR_PREFIX, rName).amountConf, "3")
}
val error = intercept[SparkException] {
ResourceRequestHelper.validateResources(conf)
}.getMessage()
assert(error.contains("Do not use spark.yarn.executor.resource.yarn.io/fpga," +
" please use spark.executor.resource.fpga.amount"))
assert(error.contains("Do not use spark.yarn.executor.resource.yarn.io/gpu," +
" please use spark.executor.resource.gpu.amount"))
}
test("custom resources spark config mapped to yarn config") {
assume(ResourceRequestHelper.isYarnResourceTypesAvailable())
val yarnMadeupResource = "yarn.io/madeup"
val resources = Map(YARN_GPU_RESOURCE_CONFIG -> "gpu",
YARN_FPGA_RESOURCE_CONFIG -> "fpga",
yarnMadeupResource -> "madeup")
ResourceRequestTestHelper.initializeResourceTypes(resources.keys.toSeq)
val conf = new SparkConf().set(SUBMIT_DEPLOY_MODE, "cluster")
resources.values.foreach { rName =>
conf.set(ResourceID(SPARK_DRIVER_PREFIX, rName).amountConf, "3")
}
// also just set yarn one that we don't convert
conf.set(YARN_DRIVER_RESOURCE_TYPES_PREFIX + yarnMadeupResource, "5")
val appContext = Records.newRecord(classOf[ApplicationSubmissionContext])
val getNewApplicationResponse = Records.newRecord(classOf[GetNewApplicationResponse])
val containerLaunchContext = Records.newRecord(classOf[ContainerLaunchContext])
val client = new Client(new ClientArguments(Array()), conf, null)
val newContext = client.createApplicationSubmissionContext(
new YarnClientApplication(getNewApplicationResponse, appContext),
containerLaunchContext)
val yarnRInfo = ResourceRequestTestHelper.getResources(newContext.getResource)
val allResourceInfo = yarnRInfo.map(rInfo => (rInfo.name -> rInfo.value)).toMap
assert(allResourceInfo.get(YARN_GPU_RESOURCE_CONFIG).nonEmpty)
assert(allResourceInfo.get(YARN_GPU_RESOURCE_CONFIG).get === 3)
assert(allResourceInfo.get(YARN_FPGA_RESOURCE_CONFIG).nonEmpty)
assert(allResourceInfo.get(YARN_FPGA_RESOURCE_CONFIG).get === 3)
assert(allResourceInfo.get(yarnMadeupResource).nonEmpty)
assert(allResourceInfo.get(yarnMadeupResource).get === 5)
}
private val matching = Seq(
("files URI match test1", "file:///file1", "file:///file2"),
("files URI match test2", "file:///c:file1", "file://c:file2"),
("files URI match test3", "file://host/file1", "file://host/file2"),
("wasb URI match test", "wasb://bucket1@user", "wasb://bucket1@user/"),
("hdfs URI match test", "hdfs:/path1", "hdfs:/path1")
)
matching.foreach { t =>
test(t._1) {
assert(Client.compareUri(new URI(t._2), new URI(t._3)),
s"No match between ${t._2} and ${t._3}")
}
}
private val unmatching = Seq(
("files URI unmatch test1", "file:///file1", "file://host/file2"),
("files URI unmatch test2", "file://host/file1", "file:///file2"),
("files URI unmatch test3", "file://host/file1", "file://host2/file2"),
("wasb URI unmatch test1", "wasb://bucket1@user", "wasb://bucket2@user/"),
("wasb URI unmatch test2", "wasb://bucket1@user", "wasb://bucket1@user2/"),
("s3 URI unmatch test", "s3a://user@pass:bucket1/", "s3a://user2@pass2:bucket1/"),
("hdfs URI unmatch test1", "hdfs://namenode1/path1", "hdfs://namenode1:8080/path2"),
("hdfs URI unmatch test2", "hdfs://namenode1:8020/path1", "hdfs://namenode1:8080/path2")
)
unmatching.foreach { t =>
test(t._1) {
assert(!Client.compareUri(new URI(t._2), new URI(t._3)),
s"match between ${t._2} and ${t._3}")
}
}
object Fixtures {
val knownDefYarnAppCP: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
val knownDefMRAppCP: Seq[String] =
MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH.split(",").toSeq
val knownYARNAppCP = "/known/yarn/path"
val knownMRAppCP = "/known/mr/path"
val mapMRAppConf = Map("mapreduce.application.classpath" -> knownMRAppCP)
val mapYARNAppConf = Map(YarnConfiguration.YARN_APPLICATION_CLASSPATH -> knownYARNAppCP)
val mapAppConf = mapYARNAppConf ++ mapMRAppConf
}
def withAppConf(m: Map[String, String] = Map())(testCode: (Configuration) => Any) {
val conf = new Configuration
m.foreach { case (k, v) => conf.set(k, v, "ClientSpec") }
testCode(conf)
}
def newEnv: MutableHashMap[String, String] = MutableHashMap[String, String]()
def classpath(env: MutableHashMap[String, String]): Array[String] =
env(Environment.CLASSPATH.name).split(":|;|<CPS>")
private def createClient(
sparkConf: SparkConf,
args: Array[String] = Array()): Client = {
val clientArgs = new ClientArguments(args)
spy(new Client(clientArgs, sparkConf, null))
}
private def classpath(client: Client): Array[String] = {
val env = new MutableHashMap[String, String]()
populateClasspath(null, new Configuration(), client.sparkConf, env)
classpath(env)
}
}
| aosagie/spark | resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/ClientSuite.scala | Scala | apache-2.0 | 22,923 |
/*
* Copyright (c) 2012, The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.queue.pipeline
/**
* Data validations to evaluate on a GATKReport.
*/
class PipelineTestEvalSpec {
/** Eval modules to output. */
var evalReport: String = _
/** Validations to assert. */
var validations: Seq[PipelineValidation[_]] = Nil
}
/** A VariantEval JEXL and range of values to validate. */
abstract class PipelineValidation[T <: AnyVal](val table: String, val key: String, val metric: String, val target: T, val min: T, val max: T) {
def parse(x: String): T
def inRange(x: String): Boolean
}
/** A VariantEval JEXL and target to validate within a 1% tolerance. */
class IntegerValidation(table: String, key: String, metric: String, target: Int)
extends PipelineValidation[Int](table, key, metric, target,
(target * .99).floor.toInt, (target * 1.01).ceil.toInt) {
def parse(x: String) = x.toInt
def inRange(x: String) = parse(x) >= min && parse(x) <= max
}
/** A VariantEval JEXL and target to validate within a 1% tolerance. */
class DoubleValidation(table: String, key: String, metric: String, target: Double)
extends PipelineValidation(table, key, metric, target,
(target * 99).floor / 100, (target * 101).ceil / 100) {
def parse(x: String) = x.toDouble
def inRange(x: String) = parse(x) >= min && parse(x) <= max
}
| iontorrent/Torrent-Variant-Caller-stable | public/scala/test/org/broadinstitute/sting/queue/pipeline/PipelineTestEvalSpec.scala | Scala | mit | 2,454 |
package com.ibm.spark.kernel.api
/**
* Represents the methods available to stream data from the kernel to the
* client.
*/
trait StreamMethodsLike {
/**
* Sends all text provided as one stream message to the client.
* @param text The text to wrap in a stream message
*/
def sendAll(text: String): Unit
}
| yeghishe/spark-kernel | kernel-api/src/main/scala/com/ibm/spark/kernel/api/StreamMethodsLike.scala | Scala | apache-2.0 | 321 |
import scala.language._
trait R[+Repr]
trait TraversableOps {
implicit val R: R[Nothing] = ???
// Removing the implicit parameter in both fixes the crash
// removing it into one only gives a valid compiler error.
trait OpsDup1[Repr] {
def force(implicit bf: R[Repr]): Any
}
trait Ops[Repr] extends OpsDup1[Repr] {
def force(implicit bf: R[Repr], dummy: DummyImplicit): Any
}
implicit def ct2ops[T, C[+X]](t: C[T]):
Ops[C[T]]
def force[T](t: Option[T]) =
// ct2ops(t).force
t.force //Fails compilation on 2.10.2.
/* To get a closer look at the crash:
:power
val foo = typeOf[C].member(TermName("foo"))
val pt = analyzer.HasMember(TermName("force"))
val instantiated = foo.info.finalResultType.instantiateTypeParams(foo.typeParams, foo.typeParams.map(TypeVar(_)))
instantiated <:< pt
*/
def foo[T, C[+X]]: Ops[C[T]]
}
| densh/dotty | tests/pos/t7785.scala | Scala | bsd-3-clause | 878 |
/*
* Copyright (C) 2015 Noorq, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.domain
import scala.concurrent.ExecutionContext.Implicits.global
import com.mailrest.maildal.model.Template
import com.mailrest.maildal.model.TemplateEngineType
import controllers.action.DomainRequest
import play.api.data.Form
import play.api.data.Forms._
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.libs.json.Writes
import play.api.mvc.AnyContent
import scaldi.Injector
import services.TemplateBean
import services.TemplateId
import services.TemplateService
import services.TemplateInfo
class TemplateController(implicit inj: Injector) extends AbstractDomainController {
val templateService = inject [TemplateService]
implicit val templateWrites = new Writes[TemplateInfo] {
override def writes(t: TemplateInfo): JsValue = {
Json.obj(
"displayName" -> t.template.displayName,
"description" -> t.template.description,
"engine" -> t.template.engine.name,
"fromRecipients" -> t.template.fromRecipients,
"bccRecipients" -> t.template.bccRecipients,
"subject" -> t.template.subject,
"textBody" -> t.template.textBody,
"htmlBody" -> t.template.htmlBody,
"deployedAt" -> t.deployedAt
)
}
}
val templateMapping = mapping(
"name" -> optional(text),
"description" -> optional(text),
"engine" -> nonEmptyText,
"fromRecipients" -> optional(text),
"bccRecipients" -> optional(text),
"subject" -> optional(text),
"textBody" -> optional(text),
"htmlBody" -> optional(text)
)(TemplateForm.apply)(TemplateForm.unapply)
val newTemplateForm = Form(
mapping(
"templateId" -> nonEmptyText,
"env" -> nonEmptyText,
"template" -> templateMapping
)(NewTemplateForm.apply)(NewTemplateForm.unapply)
)
val templateForm = Form(templateMapping)
def makeId(request: DomainRequest[AnyContent], templateId: String, env: String): TemplateId = {
new TemplateId(
request.domainContext.get.id.accountId,
request.domainContext.get.id.domainId,
templateId,
env
)
}
def makeBean(form: TemplateForm): TemplateBean = {
new TemplateBean(
form.name.getOrElse(""),
form.description.getOrElse(""),
TemplateEngineType.valueOf(form.engine),
form.fromRecipients.getOrElse(""),
form.bccRecipients.getOrElse(""),
form.subject.getOrElse(""),
form.textBody.getOrElse(""),
form.htmlBody.getOrElse("")
)
}
def create(domIdn: String) = domainAction(domIdn).async {
implicit request => {
val form = newTemplateForm.bindFromRequest.get
templateService.update(
makeId(request, form.templateId, form.env),
makeBean(form.template)
).map { x => Ok }
}
}
def find(domIdn: String, tplId: String, env: String) = domainAction(domIdn).async {
implicit request => {
templateService.find(makeId(request, tplId, env)).map { x => {
x match {
case Some(t) => Ok(Json.toJson(t))
case None => NotFound
}
} }
}
}
def update(domIdn: String, tplId: String, env: String) = domainAction(domIdn).async {
implicit request => {
val form = templateForm.bindFromRequest.get
templateService.update(
makeId(request, tplId, env),
makeBean(form)
).map { x => Ok }
}
}
def delete(domIdn: String, tplId: String, env: String, deployedAt: Option[Long]) = domainAction(domIdn).async {
implicit request => {
templateService.delete(
makeId(request, tplId, env),
deployedAt.getOrElse(0)
).map { x => Ok }
}
}
}
case class NewTemplateForm(templateId: String, env: String, template: TemplateForm)
case class TemplateForm(
name: Option[String], description: Option[String], engine: String,
fromRecipients: Option[String], bccRecipients: Option[String],
subject: Option[String], textBody: Option[String], htmlBody: Option[String]
)
| mailrest/mailrest | app/controllers/domain/TemplateController.scala | Scala | apache-2.0 | 4,954 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.nio.file.Files
import java.util.Properties
import kafka.common.{InconsistentBrokerMetadataException, InconsistentNodeIdException, KafkaException}
import kafka.log.UnifiedLog
import org.apache.kafka.common.Uuid
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.test.TestUtils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
class KafkaRaftServerTest {
private val clusterIdBase64 = "H3KKO4NTRPaCWtEmm3vW7A"
@Test
def testSuccessfulLoadMetaProperties(): Unit = {
val clusterId = clusterIdBase64
val nodeId = 0
val metaProperties = MetaProperties(clusterId, nodeId)
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker,controller")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.AdvertisedListenersProp, "PLAINTEXT://127.0.0.1:9092")
configProperties.put(KafkaConfig.QuorumVotersProp, s"$nodeId@localhost:9092")
configProperties.put(KafkaConfig.ControllerListenerNamesProp, "PLAINTEXT")
val (loadedMetaProperties, offlineDirs) =
invokeLoadMetaProperties(metaProperties, configProperties)
assertEquals(metaProperties, loadedMetaProperties)
assertEquals(Seq.empty, offlineDirs)
}
@Test
def testLoadMetaPropertiesWithInconsistentNodeId(): Unit = {
val clusterId = clusterIdBase64
val metaNodeId = 1
val configNodeId = 0
val metaProperties = MetaProperties(clusterId, metaNodeId)
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "controller")
configProperties.put(KafkaConfig.NodeIdProp, configNodeId.toString)
configProperties.put(KafkaConfig.QuorumVotersProp, s"$configNodeId@localhost:9092")
configProperties.put(KafkaConfig.ControllerListenerNamesProp, "PLAINTEXT")
assertThrows(classOf[InconsistentNodeIdException], () =>
invokeLoadMetaProperties(metaProperties, configProperties))
}
private def invokeLoadMetaProperties(
metaProperties: MetaProperties,
configProperties: Properties
): (MetaProperties, collection.Seq[String]) = {
val tempLogDir = TestUtils.tempDirectory()
try {
writeMetaProperties(tempLogDir, metaProperties)
configProperties.put(KafkaConfig.LogDirProp, tempLogDir.getAbsolutePath)
val config = KafkaConfig.fromProps(configProperties)
KafkaRaftServer.initializeLogDirs(config)
} finally {
Utils.delete(tempLogDir)
}
}
private def writeMetaProperties(
logDir: File,
metaProperties: MetaProperties
): Unit = {
val metaPropertiesFile = new File(logDir.getAbsolutePath, "meta.properties")
val checkpoint = new BrokerMetadataCheckpoint(metaPropertiesFile)
checkpoint.write(metaProperties.toProperties)
}
@Test
def testStartupFailsIfMetaPropertiesMissingInSomeLogDir(): Unit = {
val clusterId = clusterIdBase64
val nodeId = 1
// One log dir is online and has properly formatted `meta.properties`.
// The other is online, but has no `meta.properties`.
val logDir1 = TestUtils.tempDirectory()
val logDir2 = TestUtils.tempDirectory()
writeMetaProperties(logDir1, MetaProperties(clusterId, nodeId))
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
configProperties.put(KafkaConfig.LogDirProp, Seq(logDir1, logDir2).map(_.getAbsolutePath).mkString(","))
val config = KafkaConfig.fromProps(configProperties)
assertThrows(classOf[KafkaException], () => KafkaRaftServer.initializeLogDirs(config))
}
@Test
def testStartupFailsIfMetaLogDirIsOffline(): Unit = {
val clusterId = clusterIdBase64
val nodeId = 1
// One log dir is online and has properly formatted `meta.properties`
val validDir = TestUtils.tempDirectory()
writeMetaProperties(validDir, MetaProperties(clusterId, nodeId))
// Use a regular file as an invalid log dir to trigger an IO error
val invalidDir = TestUtils.tempFile("blah")
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker")
configProperties.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.MetadataLogDirProp, invalidDir.getAbsolutePath)
configProperties.put(KafkaConfig.LogDirProp, validDir.getAbsolutePath)
val config = KafkaConfig.fromProps(configProperties)
assertThrows(classOf[KafkaException], () => KafkaRaftServer.initializeLogDirs(config))
}
@Test
def testStartupDoesNotFailIfDataDirIsOffline(): Unit = {
val clusterId = clusterIdBase64
val nodeId = 1
// One log dir is online and has properly formatted `meta.properties`
val validDir = TestUtils.tempDirectory()
writeMetaProperties(validDir, MetaProperties(clusterId, nodeId))
// Use a regular file as an invalid log dir to trigger an IO error
val invalidDir = TestUtils.tempFile("blah")
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
configProperties.put(KafkaConfig.MetadataLogDirProp, validDir.getAbsolutePath)
configProperties.put(KafkaConfig.LogDirProp, invalidDir.getAbsolutePath)
val config = KafkaConfig.fromProps(configProperties)
val (loadedProperties, offlineDirs) = KafkaRaftServer.initializeLogDirs(config)
assertEquals(nodeId, loadedProperties.nodeId)
assertEquals(Seq(invalidDir.getAbsolutePath), offlineDirs)
}
@Test
def testStartupFailsIfUnexpectedMetadataDir(): Unit = {
val nodeId = 1
val clusterId = clusterIdBase64
// Create two directories with valid `meta.properties`
val metadataDir = TestUtils.tempDirectory()
val dataDir = TestUtils.tempDirectory()
Seq(metadataDir, dataDir).foreach { dir =>
writeMetaProperties(dir, MetaProperties(clusterId, nodeId))
}
// Create the metadata dir in the data directory
Files.createDirectory(new File(dataDir, UnifiedLog.logDirName(KafkaRaftServer.MetadataPartition)).toPath)
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
configProperties.put(KafkaConfig.MetadataLogDirProp, metadataDir.getAbsolutePath)
configProperties.put(KafkaConfig.LogDirProp, dataDir.getAbsolutePath)
val config = KafkaConfig.fromProps(configProperties)
assertThrows(classOf[KafkaException], () => KafkaRaftServer.initializeLogDirs(config))
}
@Test
def testLoadPropertiesWithInconsistentClusterIds(): Unit = {
val nodeId = 1
val logDir1 = TestUtils.tempDirectory()
val logDir2 = TestUtils.tempDirectory()
// Create a random clusterId in each log dir
Seq(logDir1, logDir2).foreach { dir =>
writeMetaProperties(dir, MetaProperties(clusterId = Uuid.randomUuid().toString, nodeId))
}
val configProperties = new Properties
configProperties.put(KafkaConfig.ProcessRolesProp, "broker")
configProperties.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
configProperties.put(KafkaConfig.NodeIdProp, nodeId.toString)
configProperties.put(KafkaConfig.LogDirProp, Seq(logDir1, logDir2).map(_.getAbsolutePath).mkString(","))
val config = KafkaConfig.fromProps(configProperties)
assertThrows(classOf[InconsistentBrokerMetadataException],
() => KafkaRaftServer.initializeLogDirs(config))
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/KafkaRaftServerTest.scala | Scala | apache-2.0 | 8,791 |
package edu.eckerd.google.api.services.directory.models
case class Users(
users: Option[List[User]],
nextPageToken: Option[String]
)
| EckerdCollege/google-api-scala | src/main/scala/edu/eckerd/google/api/services/directory/models/Users.scala | Scala | apache-2.0 | 182 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spot.utilities
import org.apache.spark.broadcast.Broadcast
/**
* Routines and data for processing URLs for domains, subdomains, country code, top-level domains, etc.
*/
object DomainProcessor extends Serializable {
val CountryCodes = Set("ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar", "as", "at", "au",
"aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg", "bh", "bi", "bj", "bm", "bn", "bo", "bq", "br", "bs", "bt",
"bv", "bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl", "cm", "cn", "co", "cr", "cu", "cv",
"cw", "cx", "cy", "cz", "de", "dj", "dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu", "fi",
"fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg", "gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr",
"gs", "gt", "gu", "gw", "gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in", "io", "iq", "ir",
"is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh", "ki", "km", "kn", "kp", "kr", "krd", "kw", "ky", "kz", "la",
"lb", "lc", "li", "lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mg", "mh", "mk", "ml", "mm",
"mn", "mo", "mp", "mq", "mr", "ms", "mt", "mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph", "pk", "pl", "pm", "pn", "pr", "ps", "pt",
"pw", "py", "qa", "re", "ro", "rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj", "", "sk",
"sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy", "sz", "tc", "td", "tf", "tg", "th", "tj", "tk",
"tl", "tm", "tn", "to", "tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "us", "uy", "uz", "va", "vc", "ve",
"vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt", "za", "zm", "zw")
val TopLevelDomainNames = Set("com", "org", "net", "int", "edu", "gov", "mil")
val None = "None"
/**
* Extract domain info from a url.
* @param url Incoming url.
* @param topDomainsBC Broadcast variable containing the top domains set.
* @param userDomain Domain of the spot user (example:'intel').
* @return New [[DomainInfo]] object containing extracted domain information.
*/
def extractDomainInfo(url: String, topDomainsBC: Broadcast[Set[String]], userDomain: String): DomainInfo = {
val spliturl = url.split('.')
val numParts = spliturl.length
val (domain, subdomain) = extractDomainSubdomain(url)
val subdomainLength = if (subdomain != None) {
subdomain.length
} else {
0
}
val topDomainClass = if (userDomain != "" && domain == userDomain) {
2
} else if (topDomainsBC.value contains domain) {
1
} else {
0
}
val subdomainEntropy = if (subdomain != "None") Entropy.stringEntropy(subdomain) else 0d
DomainInfo(domain, topDomainClass, subdomain, subdomainLength, subdomainEntropy, numParts)
}
/**
*
* @param url Url from which to extract domain.
* @return Domain name or "None" if there is none.
*/
def extractDomain(url: String) : String = {
val (domain, _) = extractDomainSubdomain(url)
domain
}
/**
* Extrat the domain and subdomain from a URL.
* @param url URL to be parsed.
* @return Pair of (domain, subdomain). If there is no domain, both fields contain "None".
* If there is no subdomain then the subdomain field is "None"
*/
def extractDomainSubdomain(url: String) : (String, String) = {
val spliturl = url.split('.')
val numParts = spliturl.length
var domain = None
var subdomain = None
// First check if query is an IP address e.g.: 123.103.104.10.in-addr.arpa or a name.
// Such URLs receive a domain of NO_DOMAIN
if (numParts >= 2
&& !(numParts > 2 && spliturl(numParts - 1) == "arpa" && spliturl(numParts - 2) == "in-addr")
&& (CountryCodes.contains(spliturl.last) || TopLevelDomainNames.contains(spliturl.last))) {
val strippedSplitURL = removeTopLevelDomainName(removeCountryCode(spliturl))
if (strippedSplitURL.length > 0) {
domain = strippedSplitURL.last
if (strippedSplitURL.length > 1) {
subdomain = strippedSplitURL.slice(0, strippedSplitURL.length - 1).mkString(".")
}
}
}
(domain, subdomain)
}
/**
* Strip the country code from a split URL.
* @param urlComponents Array of the entries of a URL after splitting on periods.
* @return URL components with the country code stripped.
*/
def removeCountryCode(urlComponents: Array[String]): Array[String] = {
if (CountryCodes.contains(urlComponents.last)) {
urlComponents.dropRight(1)
} else {
urlComponents
}
}
/**
* Strip the top-level domain name from a split URL.
* @param urlComponents Array of the entries ofa URL after splitting on periods.
* @return URL components with the top-level domain name stripped.
*/
def removeTopLevelDomainName(urlComponents: Array[String]): Array[String] = {
if (TopLevelDomainNames.contains(urlComponents.last)) {
urlComponents.dropRight(1)
} else {
urlComponents
}
}
/**
* Commonly extracted domain features.
*
* @param domain Domain (if any) of a url.
* @param topDomain Numerical class of domain: 2 for Intel, 1 for Alexa top domains, 0 for others.
* @param subdomain Subdomain (if any) in the url.
* @param subdomainLength Length of the subdomain. 0 if there is none.
* @param subdomainEntropy Entropy of the subdomain viewed as a distribution on its character set.
* 0 if there is no subdomain.
* @param numPeriods Number of periods + 1 in the url. (Number of sub-strings where url is split by periods.)
*/
case class DomainInfo(domain: String,
topDomain: Int,
subdomain: String,
subdomainLength: Int,
subdomainEntropy: Double,
numPeriods: Int)
}
| brandon-edwards/incubator-spot | spot-ml/src/main/scala/org/apache/spot/utilities/DomainProcessor.scala | Scala | apache-2.0 | 6,936 |
package sample
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SQLContext, DataFrame}
import scala.reflect.internal.util.TableDef.Column
import org.apache.spark.sql.functions._
case class Olympic(player: String, country: String,year: String,
sport: String,bronze: Int, siver: Int, gold: Int, total: Int)
trait OlympicCalc {
type CountryName = String
type MedalCount = Int
type Year = String
type Player = String
type Sport = String
def findMostMedalsYear4USA: (DataFrame,SQLContext) => String = (df,sc) => {
// toYearMedalPair(rdd, "United States").
// sortBy(_._2, ascending = false).map(_._1).first()
import sc.implicits._
toYearMedalPairDF(df,"United States",sc).orderBy($"total".desc).first().getAs[String]("year")
}
def toYearMedalPair(rdd: RDD[String], country: CountryName): RDD[(Year, MedalCount)] = {
rdd.map(_.split(",")).filter(_(1) == country).
map(a => (a(2), a(7).toInt)).reduceByKey(_ + _)
}
def toYearMedalPairDF(dataFrame: DataFrame, country: CountryName, sqlContext: SQLContext): DataFrame = {
import sqlContext.implicits._
dataFrame.filter($"country" === country).
select("year","total").groupBy("year").agg(sum("total") as "total")
}
def findCountryHavingMostMedals: RDD[String] => (CountryName, MedalCount) = rdd => {
rdd.map(_.split(",")).map(a => (a(1),a(7).toInt)).reduceByKey(_ + _).
sortBy(_._2, ascending = false).first()
}
/**
* a sample that use dataframe to do the same thing as rdd
*/
def findCountryHavingMostMedalsDF: (DataFrame,SQLContext) => (CountryName, MedalCount) = { (df,sc) =>
import org.apache.spark.sql.functions._
import sc.implicits._
val rs = df.select("country","total").groupBy("country").agg(sum("total").as("total")).
orderBy($"total".desc).first()
(rs.getAs[String]("country"), rs.getAs[Int]("total"))
}
def findYearsUSAMedalLt200: RDD[String] => Array[(Year,MedalCount)] = { rdd =>
toYearMedalPair(rdd,"United States").filter{ case (y,m) => m < 200}.collect()
}
def findPlayerHasMedalInSportsGT1: RDD[String] => Array[Player] = { rdd =>
rdd.map(_.split(",")).map(a => (a(0),a(3))).
aggregateByKey(Set[Sport]())({(ss,s) => ss + s },{ (ss1,ss2) => ss1 ++ ss2 }).
filter(_._2.size > 1).map(_._1).collect()
}
}
| notyy/mlTraining | src/main/scala/sample/OlympicCalc.scala | Scala | apache-2.0 | 2,342 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.annotation
/** A method annotation which verifies that the method will be compiled
* with tail call optimization.
*
* If it is present, the compiler will issue an error if the method cannot
* be optimized into a loop.
*/
final class tailrec extends scala.annotation.StaticAnnotation
| martijnhoekstra/scala | src/library/scala/annotation/tailrec.scala | Scala | apache-2.0 | 596 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.actors
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit, TestProbe}
import io.coral.actors.CoralActor._
import org.json4s.JsonAST.JValue
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.collection.immutable.SortedSet
import scala.concurrent.Await
import scala.language.postfixOps
import scala.util.Success
import akka.util.Timeout
import org.json4s.JObject
import akka.actor._
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import scala.collection.immutable.Map
import scala.concurrent.Future
import akka.pattern.ask
import scala.concurrent.duration._
@RunWith(classOf[JUnitRunner])
class CoralActorSpec(_system: ActorSystem)
extends TestKit(_system)
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll
with ScalaFutures {
def this() = this(ActorSystem("coral"))
implicit val timeout = Timeout(100 millis)
implicit val formats = org.json4s.DefaultFormats
val root = TestActorRef[CoralActor](Props(new MinimalCoralActor), "coral")
override def afterAll() {
TestKit.shutdownActorSystem(system)
}
class MinimalCoralActor extends CoralActor(JObject()) {
}
def createCoralActor(props: Props = null, name: String = ""): CoralActor = {
val _props = if (props != null) props else Props(new MinimalCoralActor)
val ref =
if (name == "") TestActorRef[CoralActor](_props)
else TestActorRef[CoralActor](_props, root, name)
ref.underlyingActor
}
"A CoralActor" should {
"Have a 'jsonDef' method that returns the json definition" in {
val testJson = parse( """{ "test": "jsonDef" }""").asInstanceOf[JObject]
class TestCoralActor extends CoralActor(testJson) {
}
val coral = createCoralActor(Props(new TestCoralActor()))
coral.jsonDef should be(testJson)
}
"Have an 'askActor' method to ask another actor by name" in {
val coral = createCoralActor()
val probe = TestProbe()
val result = coral.askActor(probe.ref.path.toString, "ask")
probe.expectMsg("ask")
probe.reply("ask:response")
assert(result.isCompleted && result.value == Some(Success("ask:response")))
}
"Have a 'tellActor' method to tell another by name" in {
val coral = createCoralActor()
val probe = TestProbe()
coral.tellActor(probe.ref.path.toString, "tell")
probe.expectMsg("tell")
}
"Have an 'in' method" in {
val coral = createCoralActor()
val probe = TestProbe()
coral.in(10 millis) {
coral.tellActor(probe.ref.path.toString, "msg2")
}
probe.expectMsg(100 millis, "msg2")
}
"Handle any JSON message" in {
val testJson: JValue = parse( """{ "test": "emit" }""")
class TestCoralActor extends MinimalCoralActor {
override def trigger = json => Future.successful(Some(testJson.merge(json)))
}
val coral = createCoralActor(Props(new TestCoralActor))
val probe = TestProbe()
val json = parse( """{ "something": "else" }""")
val expected = testJson.merge(json)
coral.emitTargets += probe.ref
coral.self ! json
probe.expectMsg(expected)
}
"Ignore an incomplete JSON message (that is, makes trigger returns nothing)" in {
val testJson: JValue = parse( """{ "test": "incomplete" }""")
class TestCoralActor extends MinimalCoralActor {
override def trigger = _ => Future.successful(None)
}
val coral = createCoralActor(Props(new TestCoralActor))
val probe = TestProbe()
coral.emitTargets += probe.ref
coral.self ! testJson
probe.expectNoMsg(100 millis)
}
"Ignore an JSON message that makes trigger fail" in {
val testJson: JValue = parse( """{ "test": "fail" }""")
class TestCoralActor extends MinimalCoralActor {
override def trigger = _ => Future.failed({
new Exception("Test exception on purpose")
})
}
val coral = createCoralActor(Props(new TestCoralActor))
val probe = TestProbe()
coral.emitTargets += probe.ref
coral.self ! testJson
probe.expectNoMsg(100 millis)
}
"Handle a 'Shunt' message" in {
val testJson: JValue = parse( """{ "test": "emit" }""")
class TestCoralActor extends MinimalCoralActor {
override def trigger = json => Future.successful(Some(testJson.merge(json)))
}
val coral = createCoralActor(Props(new TestCoralActor))
val json = parse( """{ "something": "else" }""")
val expected = testJson.merge(json)
coral.self ! Shunt(json.asInstanceOf[JObject])
expectMsg(expected)
}
"Ignore a 'Shunt' message that triggers none" in {
val testJson: JValue = parse( """{ "test": "emit" }""")
class TestCoralActor extends MinimalCoralActor {
override def trigger = _ => Future.successful(None)
}
val coral = createCoralActor(Props(new TestCoralActor))
val json = parse( """{ "something": "else" }""")
coral.self ! Shunt(json.asInstanceOf[JObject])
expectNoMsg(100 millis)
}
"Have 'noProcess' produce empty future option" in {
val coral = createCoralActor()
val result = coral.trigger(parse( """{"test": "whatever"}""").asInstanceOf[JObject])
whenReady(result) {
value => value should be(Some(JNothing))
}
}
"Be activated after a 'Trigger' message" in {
val testJson: JValue = parse( """{ "test": "trigger" }""")
class TestCoralActor extends MinimalCoralActor {
var wasExecuted = false
override def trigger = _ => Future.successful({
wasExecuted = true
None
})
}
val coral = createCoralActor(Props(new TestCoralActor))
coral.process(parse("{}").asInstanceOf[JObject])
expectNoMsg(100 millis)
coral.asInstanceOf[TestCoralActor].wasExecuted should be(true)
}
"Be defined in concrete implementations of 'trigger'" in {
val testJson: JValue = parse( """{ "test": "trigger2" }""")
class TestCoralActor extends MinimalCoralActor {
var wasExecuted = false
override def trigger = _ => Future.successful {
wasExecuted = true
None
}
}
val coral = createCoralActor(Props(new TestCoralActor))
val result = coral.trigger(testJson.asInstanceOf[JObject])
whenReady(result) {
value => value should be(None)
}
coral.asInstanceOf[TestCoralActor].wasExecuted should be(true)
}
"Emit to actors registered with a 'RegisterActor' message" in {
val coral = createCoralActor()
val probe = TestProbe()
coral.self ! RegisterActor(probe.ref)
coral.emitTargets should be(SortedSet(probe.ref))
}
"Have a 'emit' method" in {
val coral = createCoralActor()
val probe1 = TestProbe()
val probe2 = TestProbe()
coral.emitTargets += probe2.ref
coral.emitTargets += probe1.ref
val json = parse( """{ "test": "transmit" }""")
coral.emit(json)
probe1.expectMsg(json)
probe2.expectMsg(json)
coral.emit(JNothing)
probe1.expectNoMsg(100 millis)
}
"Have a default implementation of no state" in {
val coral = createCoralActor(Props(new MinimalCoralActor))
coral.state should be(Map.empty)
}
"Be defined in concrete implementations of 'state'" in {
val testState = Map("key" -> JDouble(1.6))
class TestCoralActor extends MinimalCoralActor {
override def state: Map[String, JValue] = testState
}
val coral = createCoralActor(Props(new TestCoralActor))
coral.state should be(testState)
}
"Be accessible with a 'GetField' message" in {
val testValue = JDouble(3.1)
val testState = Map("key" -> testValue)
val expected: JObject = ("key" -> testValue)
class TestCoralActor extends MinimalCoralActor {
override def state: Map[String, JValue] = testState
}
val coral = createCoralActor(Props(new TestCoralActor))
coral.self ! GetField("key")
expectMsg(expected)
coral.self ! GetField("non-existing key")
expectMsg(JNothing)
}
}
} | coral-streaming/coral | src/test/scala/io/coral/actors/CoralActorSpec.scala | Scala | apache-2.0 | 8,516 |
package com.typesafe.sbt
package packager
import sbt._
import com.typesafe.sbt.packager.archetypes.JavaAppPackaging
/** A set of helper methods to simplify the writing of mappings */
object MappingsHelper {
/**
* return a Seq of mappings which effect is to add a whole directory in the generated package
*
* @example
* {{{
* mappings in Universal ++= directory(baseDirectory.value / "extra")
* }}}
*
* @param sourceDir
* @return mappings
*/
def directory(sourceDir: File): Seq[(File, String)] = {
val parentFile = sourceDir.getParentFile
if (parentFile != null)
sourceDir.*** pair relativeTo(sourceDir.getParentFile)
else
sourceDir.*** pair basic
}
/**
* It lightens the build file if one wants to give a string instead of file.
*
* @example
* {{{
* mappings in Universal ++= directory("extra")
* }}}
*
* @param sourceDir
* @return mappings
*/
def directory(sourceDir: String): Seq[(File, String)] = {
directory(file(sourceDir))
}
/**
* return a Seq of mappings which effect is to add the content of directory in the generated package,
* excluding the directory itself.
*
* @example
* {{{
* mappings in Universal ++= sourceDir(baseDirectory.value / "extra")
* }}}
*
* @param sourceDir
* @return mappings
*/
def contentOf(sourceDir: File): Seq[(File, String)] = {
(sourceDir.*** --- sourceDir) pair relativeTo(sourceDir)
}
/**
* It lightens the build file if one wants to give a string instead of file.
*
* @example
* {{{
* mappings in Universal ++= sourceDir("extra")
* }}}
*
* @param sourceDir as string representation
* @return mappings
*/
def contentOf(sourceDir: String): Seq[(File, String)] = {
contentOf(file(sourceDir))
}
/**
* Create mappings from your classpath. For example if you want to add additional
* dependencies, like test or model.
*
*
* @example Add all test artifacts to a separated test folder
* {{{
* mappings in Universal ++= fromClasspath((managedClasspath in Test).value, target = "test")
* }}}
*
* @param entries
* @param target
* @return a list of mappings
*/
def fromClasspath(entries: Seq[Attributed[File]], target: String): Seq[(File, String)] = {
fromClasspath(entries, target, _ => true)
}
/**
* Create mappings from your classpath. For example if you want to add additional
* dependencies, like test or model. You can also filter the artifacts that should
* be mapped to mappings.
*
* @example Filter all osgi bundles
* {{{
* mappings in Universal ++= fromClasspath(
* (managedClasspath in Runtime).value,
* "osgi",
* artifact => artifact.`type` == "bundle"
* )
* }}}
*
*
* @param entries from where mappings should be created from
* @param target folder, e.g. `model`. Must not end with a slash
* @param includeArtifact function to determine if an artifact should result in a mapping
* @param includeOnNoArtifact default is false. When there's no Artifact meta data remove it
*/
def fromClasspath(
entries: Seq[Attributed[File]],
target: String,
includeArtifact: Artifact => Boolean,
includeOnNoArtifact: Boolean = false): Seq[(File, String)] = {
entries
.filter(attr => attr.get(sbt.Keys.artifact.key) map includeArtifact getOrElse includeOnNoArtifact)
.map { attribute =>
val file = attribute.data
file -> s"$target/${file.getName}"
}
}
}
| damirv/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/MappingsHelper.scala | Scala | bsd-2-clause | 3,544 |
package io.rout.contenttypes
import com.twitter.app.Flag
import com.twitter.finagle.Http
import com.twitter.finagle.stats.{Counter, NullStatsReceiver}
import com.twitter.finagle.tracing.NullTracer
import com.twitter.server.TwitterServer
import com.twitter.util.Await
import io.rout._
import contentTypes._
import io.rout.generic.decoding._
import io.routs._
import io.rout.html._
import scalatags.Pretty.noPrettyPrint
import scalatags.Text.all._
import scala.util.Random
import io.circe.generic.auto._
import io.rout.circe._
object Main extends TwitterServer {
//add multiple endpoints with different Content-types (and encoders/decoders added automatically)
val port: Flag[Int] = flag("port", 8081, "TCP port for HTTP server")
val passports: Counter = statsReceiver.counter("registered_passports")
val todos: Counter = statsReceiver.counter("todos")
val etodo = scalaTag[Todo](t => tag.a(t.productIterator.map(r=> tag.li(r.toString)).toSeq))
implicit val encodeTodo: Encode.TextHtml[Todo] =
Encode.scalaTag(t => etodo(t))
implicit val seqTodo: Encode.TextHtml[Seq[Todo]] =
Encode.scalaTag(s => tag.a(s.map(etodo.apply)))
val derivedTodo: ReqRead[Todo] = derive[Int => Todo].incomplete.map(_(Random.nextInt(100000)))
val getTodos = get[Text.Html](Root / "todos").sync(r => Ok(Todo.list()))
//this is awkward to specify matched int :(
val getTodo = get[Int,Text.Html](Root / "todo" / Match[Int]).sync{ id =>
Todo.get(id) match {
case Some(t) => Ok(t)
case None => throw new TodoNotFound(id)
}
}
val htmljsonTodo =
post[Application.Json](Root / "todo" / "json" / "html").sync(derivedTodo){ todo =>
todos.incr()
Todo.save(todo)
Created(todo)
}
val jsonjsonTodo =
post[Application.Json](Root / "todo" / "json" / "json").sync(binaryBody.asJson[Todo]){ todo =>
todos.incr()
Todo.save(todo)
Created(todo)
}
val htmlTodo = post[Text.Html](Root / "todo").sync(derivedTodo) { todo =>
todos.incr()
Todo.save(todo)
Created(todo)
}
val deleteTodo = delete(Root / Match[Int]).sync{id =>
Todo.delete(id)
Ok(s"todo $id deleted!")
}
val rOut = mkRoutes(Seq(
getTodo,
getTodos,
htmlTodo,
deleteTodo,
htmljsonTodo,
jsonjsonTodo))
//.withNotFound("path was not found")
.handle[Text.Html,Todo] {
case t: TodoNotFound => NotFound(t)
case t: Throwable => Forbidden(new Exception(t))
}
def main(): Unit = {
log.info("Serving the Todo with multiple content-types (Text.Html,Application.Json)")
val server = Http.server
.withCompressionLevel(0)
.withStatsReceiver(NullStatsReceiver)
//.configured(Stats(statsReceiver))
.withTracer(NullTracer)
.serve(s":${port()}", rOut.service)
onExit { server.close() }
Await.ready(adminHttpServer)
}
}
| teodimoff/rOut | examples/src/io/rout/contenttypes/Main.scala | Scala | apache-2.0 | 2,872 |
package leo.datastructures
import leo.datastructures.impl.Signature
import leo.datastructures.term._
/**
* Term index data structure
*
* @author Alexander Steen
* @since 16.10.2014
*/
object TermIndex {
protected[TermIndex] var termset: Set[Term] = Set.empty
protected[TermIndex] var symbol_of: Map[Signature#Key, Set[Term]] = Map.empty
protected[TermIndex] var headsymbol_of: Map[Term, Set[Term]] = Map.empty
protected[TermIndex] var occurs_in: Map[Term, Set[(Term, Position)]] = Map.empty
protected[TermIndex] var occurs_at: Map[Term, Map[Position, Set[Term]]] = Map.empty
def terms: Set[Term] = termset
def contains(t: Term): Boolean = termset.contains(t)
def insert(term: Term): Term = {
val t = term.betaNormalize
val t2 = if (!Term.contains(t))
Term.insert(t)
else
t
// Force computation of lazy values
t2.headSymbol
t2.freeVars
t2.occurrences
// insert to data structures
for (s <- t2.symbols) {
symbol_of.get(s) match {
case None => symbol_of += ((s,Set(t2)))
case Some(set) => symbol_of += ((s, set + t2))
}
}
val hs = t2.headSymbol
headsymbol_of.get(hs) match {
case None => headsymbol_of += ((hs, Set(t2)))
case Some(set) => headsymbol_of += ((hs, set + t2))
}
insertSubterms(t2, t2, Position.root)
termset += t2
t2
}
def byHeadsymbol(head: Term): Set[Term] = headsymbol_of.getOrElse(head, Set())
def bySymbol(sym: Signature#Key): Set[Term] = symbol_of.getOrElse(sym, Set())
def bySubterm(subterm: Term): Set[(Term, Position)] = occurs_in.getOrElse(subterm, Set())
def bySubtermAtPos(subterm: Term, pos: Position): Set[(Term)] = occurs_at.get(subterm) match {
case None => Set()
case Some(inner) => inner.getOrElse(pos, Set())
}
protected def insertSubterms(term: Term, subterm: Term, position: Position): Unit = {
occurs_in.get(subterm) match {
case None => occurs_in += ((subterm, Set((term, position))))
case Some(set) => occurs_in += ((subterm, set + ((term, position))))
}
occurs_at.get(subterm) match {
case None => occurs_at += ((subterm, Map((position, Set(term)))))
case Some(inner) => inner.get(position) match {
case None => occurs_at += ((subterm, inner + ((position, Set(term)))))
case Some(set) => occurs_at += ((subterm, inner + ((position, set + term))))
}
}
import Term.{Bound, Symbol, @@@, ∙, @@@@, :::>, TypeLambda}
subterm match {
case Bound(t,scope) => ()
case Symbol(id) => ()
case s @@@ t => () // not implemented for curried terms
case f ∙ args => insertSubterms(term, f, position.headPos); var i = 1
for(arg <- args) {
arg match {
case Left(t) => insertSubterms(term, t, position.argPos(i)); i = i+1
case Right(_) => ()
}
}
case s @@@@ ty => () // not implemented for curried terms
case ty :::> s => insertSubterms(term, s, position.abstrPos)
case TypeLambda(t) => insertSubterms(term, t, position.abstrPos)
}
}
}
| cbenzmueller/LeoPARD | src/main/scala/leo/datastructures/TermIndex.scala | Scala | bsd-3-clause | 3,265 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker.backend.javascript
import scala.annotation.switch
import org.scalajs.ir
import ir.Position
import ir.Position.NoPosition
object Trees {
import ir.Trees.requireValidIdent
/** AST node of JavaScript. */
abstract sealed class Tree {
val pos: Position
def show: String = {
val writer = new java.io.StringWriter
val printer = new Printers.JSTreePrinter(writer)
printer.printTree(this, isStat = true)
writer.toString()
}
}
// Comments
case class DocComment(text: String)(implicit val pos: Position) extends Tree
// Identifiers and properties
sealed trait PropertyName {
def pos: Position
}
case class Ident(name: String, originalName: Option[String])(
implicit val pos: Position) extends PropertyName {
requireValidIdent(name)
}
object Ident {
def apply(name: String)(implicit pos: Position): Ident =
new Ident(name, Some(name))
}
case class ComputedName(tree: Tree) extends PropertyName {
def pos: Position = tree.pos
}
// Definitions
sealed trait LocalDef extends Tree {
def name: Ident
def mutable: Boolean
def ref(implicit pos: Position): Tree = VarRef(name)
}
case class VarDef(name: Ident, rhs: Option[Tree])(implicit val pos: Position) extends LocalDef {
def mutable: Boolean = true
}
/** ES6 let or const (depending on the mutable flag). */
case class Let(name: Ident, mutable: Boolean, rhs: Option[Tree])(implicit val pos: Position) extends LocalDef
case class ParamDef(name: Ident, rest: Boolean)(implicit val pos: Position) extends LocalDef {
def mutable: Boolean = true
}
// Control flow constructs
case class Skip()(implicit val pos: Position) extends Tree
class Block private (val stats: List[Tree])(implicit val pos: Position) extends Tree {
override def toString(): String =
stats.mkString("Block(", ",", ")")
}
object Block {
def apply(stats: List[Tree])(implicit pos: Position): Tree = {
val flattenedStats = stats flatMap {
case Skip() => Nil
case Block(subStats) => subStats
case other => other :: Nil
}
flattenedStats match {
case Nil => Skip()
case only :: Nil => only
case _ => new Block(flattenedStats)
}
}
def apply(stats: Tree*)(implicit pos: Position): Tree =
apply(stats.toList)
def unapply(block: Block): Some[List[Tree]] = Some(block.stats)
}
case class Labeled(label: Ident, body: Tree)(implicit val pos: Position) extends Tree
case class Assign(lhs: Tree, rhs: Tree)(implicit val pos: Position) extends Tree {
require(lhs match {
case _:VarRef | _:DotSelect | _:BracketSelect => true
case _ => false
}, s"Invalid lhs for Assign: $lhs")
}
case class Return(expr: Tree)(implicit val pos: Position) extends Tree
case class If(cond: Tree, thenp: Tree, elsep: Tree)(implicit val pos: Position) extends Tree
case class While(cond: Tree, body: Tree, label: Option[Ident] = None)(implicit val pos: Position) extends Tree
case class DoWhile(body: Tree, cond: Tree, label: Option[Ident] = None)(implicit val pos: Position) extends Tree
case class ForIn(lhs: Tree, obj: Tree, body: Tree)(implicit val pos: Position) extends Tree
case class For(init: Tree, guard: Tree, update: Tree, body: Tree)(
implicit val pos: Position)
extends Tree
case class TryCatch(block: Tree, errVar: Ident, handler: Tree)(implicit val pos: Position) extends Tree
case class TryFinally(block: Tree, finalizer: Tree)(implicit val pos: Position) extends Tree
case class Throw(expr: Tree)(implicit val pos: Position) extends Tree
case class Break(label: Option[Ident] = None)(implicit val pos: Position) extends Tree
case class Continue(label: Option[Ident] = None)(implicit val pos: Position) extends Tree
case class Switch(selector: Tree, cases: List[(Tree, Tree)], default: Tree)(implicit val pos: Position) extends Tree
case class Debugger()(implicit val pos: Position) extends Tree
// Expressions
case class New(ctor: Tree, args: List[Tree])(implicit val pos: Position) extends Tree
case class DotSelect(qualifier: Tree, item: Ident)(implicit val pos: Position) extends Tree
case class BracketSelect(qualifier: Tree, item: Tree)(implicit val pos: Position) extends Tree
/** Syntactic apply.
* It is a method call if fun is a dot-select or bracket-select. It is a
* function call otherwise.
*/
case class Apply(fun: Tree, args: List[Tree])(implicit val pos: Position) extends Tree
/** `...items`, the "spread" operator of ECMAScript 6.
*
* It is only valid in ECMAScript 6, in the `args`/`items` of a [[New]],
* [[Apply]], or [[ArrayConstr]].
*
* @param items An iterable whose items will be spread
*/
case class Spread(items: Tree)(implicit val pos: Position) extends Tree
case class Delete(prop: Tree)(implicit val pos: Position) extends Tree {
require(prop match {
case _:DotSelect | _:BracketSelect => true
case _ => false
}, s"Invalid prop for Delete: $prop")
}
/** Unary operation (always preserves pureness).
*
* Operations which do not preserve pureness are not allowed in this tree.
* These are notably ++ and --
*/
case class UnaryOp(op: UnaryOp.Code, lhs: Tree)(implicit val pos: Position) extends Tree
object UnaryOp {
/** Codes are the same as in the IR. */
type Code = ir.Trees.JSUnaryOp.Code
}
/** `++x`, `x++`, `--x` or `x--`. */
case class IncDec(prefix: Boolean, inc: Boolean, arg: Tree)(
implicit val pos: Position)
extends Tree
/** Binary operation (always preserves pureness).
*
* Operations which do not preserve pureness are not allowed in this tree.
* These are notably +=, -=, *=, /= and %=
*/
case class BinaryOp(op: BinaryOp.Code, lhs: Tree, rhs: Tree)(implicit val pos: Position) extends Tree
object BinaryOp {
/** Codes are the same as in the IR. */
type Code = ir.Trees.JSBinaryOp.Code
}
case class ArrayConstr(items: List[Tree])(implicit val pos: Position) extends Tree
case class ObjectConstr(fields: List[(PropertyName, Tree)])(implicit val pos: Position) extends Tree
// Literals
/** Marker for literals. Literals are always pure. */
sealed trait Literal extends Tree
case class Undefined()(implicit val pos: Position) extends Literal
case class Null()(implicit val pos: Position) extends Literal
case class BooleanLiteral(value: Boolean)(implicit val pos: Position) extends Literal
case class IntLiteral(value: Int)(implicit val pos: Position) extends Literal
case class DoubleLiteral(value: Double)(implicit val pos: Position) extends Literal
case class StringLiteral(value: String)(
implicit val pos: Position) extends Literal with PropertyName
case class BigIntLiteral(value: BigInt)(
implicit val pos: Position) extends Literal
// Atomic expressions
case class VarRef(ident: Ident)(implicit val pos: Position) extends Tree
case class This()(implicit val pos: Position) extends Tree
case class Function(arrow: Boolean, args: List[ParamDef], body: Tree)(
implicit val pos: Position) extends Tree
// Named function definition
case class FunctionDef(name: Ident, args: List[ParamDef], body: Tree)(
implicit val pos: Position) extends Tree
// ECMAScript 6 classes
case class ClassDef(className: Option[Ident], parentClass: Option[Tree],
members: List[Tree])(implicit val pos: Position) extends Tree
case class MethodDef(static: Boolean, name: PropertyName, args: List[ParamDef],
body: Tree)(implicit val pos: Position) extends Tree
case class GetterDef(static: Boolean, name: PropertyName,
body: Tree)(implicit val pos: Position) extends Tree
case class SetterDef(static: Boolean, name: PropertyName, param: ParamDef,
body: Tree)(implicit val pos: Position) extends Tree
case class Super()(implicit val pos: Position) extends Tree
// ECMAScript 6 modules
/** The name of an ES module export.
*
* It must be a valid `IdentifierName`, as tested by
* [[ExportName.isValidExportName]].
*/
case class ExportName(name: String)(implicit val pos: Position) {
require(ExportName.isValidExportName(name),
s"'$name' is not a valid export name")
}
object ExportName {
/** Tests whether a string is a valid export name.
*
* A string is a valid export name if and only if it is a valid ECMAScript
* `IdentifierName`, which is defined in
* [[http://www.ecma-international.org/ecma-262/6.0/#sec-names-and-keywords
* Section 11.6 of the ECMAScript 2015 specification]].
*
* Currently, this implementation is buggy in some corner cases, as it does
* not accept code points with the Unicode properties `Other_ID_Start` and
* `Other_ID_Continue`. For example,
* `isValidIdentifierName(0x2118.toChar.toString)` will return `false`
* instead of `true`.
*
* In theory, it does not really account for code points with the Unicode
* properties `Pattern_Syntax` and `Pattern_White_Space`, which should be
* rejected. However, with the current version of Unicode (9.0.0), there
* seems to be no such character that would be accepted by this method.
*/
final def isValidExportName(name: String): Boolean = {
// scalastyle:off return
import java.lang.Character._
def isJSIdentifierStart(cp: Int): Boolean =
isUnicodeIdentifierStart(cp) || cp == '$' || cp == '_'
def isJSIdentifierPart(cp: Int): Boolean = {
val ZWNJ = 0x200c
val ZWJ = 0x200d
isUnicodeIdentifierPart(cp) || cp == '$' || cp == '_' || cp == ZWNJ || cp == ZWJ
}
if (name.isEmpty)
return false
val firstCP = name.codePointAt(0)
if (!isJSIdentifierStart(firstCP))
return false
var i = charCount(firstCP)
while (i < name.length) {
val cp = name.codePointAt(i)
if (!isJSIdentifierPart(cp))
return false
i += charCount(cp)
}
true
// scalastyle:on return
}
}
/** `import` statement, except namespace import.
*
* This corresponds to the following syntax:
* {{{
* import { <binding1_1> as <binding1_2>, ..., <bindingN_1> as <bindingN_2> } from <from>
* }}}
* The `_1` parts of bindings are therefore the identifier names that are
* imported, as specified in `export` clauses of the module. The `_2` parts
* are the names under which they are imported in the current module.
*
* Special cases:
* - When `_1.name == _2.name`, there is shorter syntax in ES, i.e.,
* `import { binding } from 'from'`.
* - When `_1.name == "default"`, it is equivalent to a default import.
*/
case class Import(bindings: List[(ExportName, Ident)], from: StringLiteral)(
implicit val pos: Position)
extends Tree
/** Namespace `import` statement.
*
* This corresponds to the following syntax:
* {{{
* import * as <binding> from <from>
* }}}
*/
case class ImportNamespace(binding: Ident, from: StringLiteral)(
implicit val pos: Position)
extends Tree
/** `export` statement.
*
* This corresponds to the following syntax:
* {{{
* export { <binding1_1> as <binding1_2>, ..., <bindingN_1> as <bindingN_2> }
* }}}
* The `_1` parts of bindings are therefore the identifiers from the current
* module that are exported. The `_2` parts are the names under which they
* are exported to other modules.
*/
case class Export(bindings: List[(Ident, ExportName)])(
implicit val pos: Position)
extends Tree
}
| nicolasstucki/scala-js | linker/shared/src/main/scala/org/scalajs/linker/backend/javascript/Trees.scala | Scala | apache-2.0 | 12,033 |
package com.arcusys.valamis.web.service
import java.io.ByteArrayInputStream
import com.arcusys.learn.liferay.util.Base64Helper
import com.arcusys.valamis.slide.convert.PDFProcessor
import org.apache.pdfbox.pdmodel.PDDocument
import org.apache.pdfbox.rendering.{ImageType, PDFRenderer}
class PDFProcessorImpl extends PDFProcessor {
private val Scale = 1.5f // <- soon to be moved to properties file
private val ImageTpe = ImageType.RGB
override def parsePDF(content: Array[Byte]): List[String]= {
val input = new ByteArrayInputStream(content)
val pdf = PDDocument.load(input)
val renderer = new PDFRenderer(pdf)
try {
val pages = (0 until pdf.getNumberOfPages)
.map(renderer.renderImage(_, Scale, ImageTpe))
.toList
Base64Helper.encodeImagesToBase64(pages)
}
finally {
pdf.close()
}
}
}
| igor-borisov/valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/service/PDFProcessorImpl.scala | Scala | gpl-3.0 | 862 |
package rww.ui
import japgolly.scalajs.react.ReactComponentB
import japgolly.scalajs.react.vdom.all._
import rww._
/**
* Created by hjs on 15/05/2015.
*/
object Loading {
val Loading = ReactComponentB[Rdf#URI]("Profile")
.initialState(None)
.renderP(($, P) => {
// import shapeless.singleton.syntax._ <- use this when using styleC
p("loading "+P.toString)
})
.build
def apply(url: Rdf#URI) = Loading(url)
}
| read-write-web/rww-scala-js | src/main/scala/rww/ui/Loading.scala | Scala | apache-2.0 | 439 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.