code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.scrooge.frontend
import com.twitter.scrooge.ast._
import scala.collection.mutable.ArrayBuffer
import scala.util.parsing.input.{NoPosition, Positional}
class PositionalException(message: String, node: Positional)
extends Exception(s"$message\\n${node.pos.longString}")
case class TypeNotFoundException(name: String, node: Positional) extends PositionalException(name, node)
case class UndefinedConstantException(name: String, node: Positional) extends PositionalException(name, node)
case class UndefinedSymbolException(name: String, node: Positional) extends PositionalException(name, node)
case class TypeMismatchException(name: String, node: Positional) extends PositionalException(name, node)
case class QualifierNotFoundException(name: String, node: Positional) extends PositionalException(name, node)
case class ResolvedDocument(document: Document, resolver: TypeResolver) {
/**
* Given an ID, produce its FQN (e.g. a Java FQN) by appending the namespace.
*/
def qualifySimpleID(
sid: SimpleID,
language: String,
defaultNamespace: String,
fallbackToJavaNamespace: Boolean = true
): Identifier = {
val fallback = if (fallbackToJavaNamespace) document.namespace("java") else None
val namespace = document.namespace(language).orElse(fallback).getOrElse(SimpleID(defaultNamespace))
sid.addScope(namespace)
}
/**
* Given a type, produce its FQN (e.g. a Java FQN) by appending the namespace.
*/
def qualifyName(
name: NamedType,
language: String,
defaultNamespace: String
): Identifier = {
name.scopePrefix match {
case Some(filename) =>
resolver.includeMap(filename.name).qualifySimpleID(name.sid, language, defaultNamespace)
case None =>
qualifySimpleID(name.sid, language, defaultNamespace)
}
}
/**
* Collect the chain of services extended by the given service.
* Returns pairs (resolvedDoc, service) -- resolvedDoc contains service
* and should be used to qualify types used by the service.
*/
def collectParentServices(service: Service): Seq[(ResolvedDocument, Service)] = {
service.parent match {
case None => Nil
case Some(ServiceParent(sid, None)) =>
val parentService = resolver.resolveService(sid)
(this, parentService) +: collectParentServices(parentService)
case Some(ServiceParent(sid, Some(filename))) =>
val doc: ResolvedDocument = resolver.includeMap(filename.name)
val parentService = doc.resolver.resolveService(sid)
(doc, parentService) +: doc.collectParentServices(parentService)
}
}
/**
* Collect and resolve services extended by the given service.
* @return a list of [[ResolvedService ResolvedServices]] that contain FQNs for the parent services.
*/
def resolveParentServices(
service: Service,
namespaceLanguage: String,
defaultNamespace: String
): Seq[ResolvedService] = {
val resolvedServices: Seq[(ResolvedDocument, Service)] = collectParentServices(service)
resolvedServices.map { case (rdoc, svc) =>
ResolvedService(
rdoc.qualifySimpleID(svc.sid.toTitleCase, namespaceLanguage, defaultNamespace),
svc
)
}
}
}
case class ResolvedService(serviceID: Identifier, service: Service)
case class ResolvedDefinition(definition: Definition, resolver: TypeResolver)
case class TypeResolver(
typeMap: Map[String, FieldType] = Map.empty,
constMap: Map[String, ConstDefinition] = Map.empty,
serviceMap: Map[String, Service] = Map.empty,
includeMap: Map[String, ResolvedDocument] = Map.empty) {
protected def getResolver(includePath: String, pos: Positional = new Positional { pos = NoPosition }): TypeResolver = {
includeMap.get(includePath).getOrElse(throw new QualifierNotFoundException(includePath, pos)).resolver
}
def resolveFieldType(id: Identifier): FieldType = id match {
case SimpleID(name, _) => typeMap.get(name).getOrElse(throw new TypeNotFoundException(name, id))
case qid: QualifiedID => getResolver(qid.names.head, qid).resolveFieldType(qid.tail)
}
def resolveServiceParent(parent: ServiceParent): Service =
parent.filename match {
case None => resolveService(parent.sid)
case Some(filename) => getResolver(filename.name).resolveService(parent.sid)
}
def resolveService(sid: SimpleID): Service = serviceMap.get(sid.name).getOrElse(
throw new UndefinedSymbolException(sid.name, sid))
def resolveConst(id: Identifier): (FieldType, RHS) = id match {
case SimpleID(name, _) =>
val const = constMap.get(name).getOrElse(throw new UndefinedConstantException(name, id))
(const.fieldType, const.value)
case qid: QualifiedID => getResolver(qid.names.head).resolveConst(qid.tail)
}
/**
* Returns a new TypeResolver with the given include mapping added.
*/
def withInclude(inc: Include): TypeResolver = {
val resolver = TypeResolver()
val resolvedDocument = resolver(inc.document, Some(inc.prefix))
copy(includeMap = includeMap + (inc.prefix.name -> resolvedDocument))
}
/**
* Returns a new TypeResolver with the given type mapping added.
*/
def withType(name: String, fieldType: FieldType): TypeResolver = {
copy(typeMap = typeMap + (name -> fieldType))
}
/**
* Returns a new TypeResolver with the given constant added.
*/
def withConst(const: ConstDefinition): TypeResolver = {
copy(constMap = constMap + (const.sid.name -> const))
}
/**
* Returns a new TypeResolver with the given service added.
*/
def withService(service: Service): TypeResolver = {
copy(serviceMap = serviceMap + (service.sid.name -> service))
}
/**
* Resolves all types in the given document.
* @param scopePrefix the scope of the document if the document is an include
*/
def apply(doc: Document, scopePrefix: Option[SimpleID] = None): ResolvedDocument = {
var resolver = this
val includes = doc.headers.collect { case i: Include => i }
val defBuf = new ArrayBuffer[Definition](doc.defs.size)
for (i <- includes) {
try {
resolver = resolver.withInclude(i)
} catch {
case ex: Throwable =>
throw new FileParseException(filename = i.filePath, cause = ex)
}
}
for (d <- doc.defs) {
val ResolvedDefinition(d2, r2) = resolver(d, scopePrefix)
resolver = r2
defBuf += d2
}
ResolvedDocument(doc.copy(defs = defBuf.toSeq), resolver)
}
/**
* Resolves types in the given definition according to the current
* typeMap, and then returns an updated TypeResolver with the new
* definition bound, plus the resolved definition.
*/
def apply(definition: Definition, scopePrefix: Option[SimpleID]): ResolvedDefinition = {
definition match {
case d @ Typedef(sid, t, _) =>
val resolved = apply(t)
ResolvedDefinition(
d.copy(fieldType = resolved),
withType(sid.name, resolved))
case s @ Struct(sid, _, fs, _, _) =>
val resolved = s.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case u @ Union(sid, _, fs, _, _) =>
val resolved = u.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case e @ Exception_(sid, _, fs, _, _) =>
val resolved = e.copy(fields = fs.map(apply))
ResolvedDefinition(
resolved,
withType(sid.name, StructType(resolved, scopePrefix)))
case c @ ConstDefinition(_, t, v, _) =>
val fieldType = apply(t)
val resolved = c.copy(fieldType = fieldType, value = apply(v, fieldType))
ResolvedDefinition(resolved, withConst(resolved))
case s @ Service(sid, parent, fs, _) =>
// No need to modify Service, but check that we can resolve parent.
parent.foreach { serviceParent => resolveServiceParent(serviceParent) }
val resolved = s.copy(functions = fs.map(apply))
ResolvedDefinition(resolved, withService(resolved))
case e @ Enum(sid, _, _, _) =>
ResolvedDefinition(e, withType(sid.name, EnumType(e, scopePrefix)))
case s @ Senum(sid, _) =>
ResolvedDefinition(s, withType(sid.name, TString))
case d: EnumField => ResolvedDefinition(d, this)
case d: FunctionArgs => ResolvedDefinition(d, this)
case d: FunctionResult => ResolvedDefinition(d, this)
}
}
def apply(f: Function): Function = f match {
case Function(_, _, t, as, ts, _) =>
f.copy(funcType = apply(t), args = as.map(apply), throws = ts.map(apply))
}
def apply(f: Field): Field = {
val fieldType = apply(f.fieldType)
f.copy(
fieldType = fieldType,
default = f.default.map { const => apply(const, fieldType) })
}
def apply(t: FunctionType): FunctionType = t match {
case Void => Void
case OnewayVoid => OnewayVoid
case t: FieldType => apply(t)
}
def apply(t: FieldType): FieldType = t match {
case ReferenceType(id) => resolveFieldType(id)
case m @ MapType(k, v, _) => m.copy(keyType = apply(k), valueType = apply(v))
case s @ SetType(e, _) => s.copy(eltType = apply(e))
case l @ ListType(e, _) => l.copy(eltType = apply(e))
case b: BaseType => b
case e: EnumType => e
case s: StructType => s
}
def apply(c: RHS, fieldType: FieldType): RHS = c match {
// list values and map values look the same in Thrift, but different in Java and Scala
// So we need type information in order to generated correct code.
case l @ ListRHS(elems) =>
fieldType match {
case ListType(eltType, _) => l.copy(elems = elems.map(e => apply(e, eltType)))
case SetType(eltType, _) => SetRHS(elems.map(e => apply(e, eltType)).toSet)
case _ => throw new TypeMismatchException("Expecting " + fieldType + ", found " + l, c)
}
case m @ MapRHS(elems) =>
fieldType match {
case MapType(keyType, valType, _) =>
m.copy(elems = elems.map { case (k, v) => (apply(k, keyType), apply(v, valType)) })
case st @ StructType(structLike: StructLike, _) =>
val fieldMultiMap: Map[String, Seq[(String, RHS)]] = elems.collect {
case (StringLiteral(fieldName), value) => (fieldName, value)
}.groupBy { case (fieldName, _) => fieldName }
val fieldMap: Map[String, RHS] = fieldMultiMap.collect {
case (fieldName: String, values: Seq[(String, RHS)]) if values.length == 1 =>
values.head
case (fieldName: String, _: Seq[(String, RHS)]) =>
throw new TypeMismatchException(s"Duplicate default values for ${fieldName} found for $fieldType", m)
// Can't have 0 elements here because fieldMultiMap is built by groupBy.
}
structLike match {
case u: Union =>
val definedFields = u.fields.collect {
case field if fieldMap.contains(field.sid.name) =>
(field, fieldMap(field.sid.name))
}
if (definedFields.length == 0)
throw new UndefinedConstantException(s"Constant value missing for union ${u.originalName}", m)
if (definedFields.length > 1)
throw new UndefinedConstantException(s"Multiple constant values for union ${u.originalName}", m)
val (field, rhs) = definedFields.head
val resolvedRhs = apply(rhs, field.fieldType)
UnionRHS(sid = st.sid, field = field, initializer = resolvedRhs)
case struct: StructLike =>
val structMap = Map.newBuilder[Field, RHS]
struct.fields.foreach { field =>
val fieldName = field.sid.name
if (fieldMap.contains(fieldName)) {
val resolvedRhs = apply(fieldMap(fieldName), field.fieldType)
structMap += field -> resolvedRhs
} else if (!field.requiredness.isOptional && field.default.isEmpty) {
throw new TypeMismatchException(s"Value required for ${fieldName} in $fieldType", m)
}
}
StructRHS(sid = st.sid, elems = structMap.result())
}
case _ => throw new TypeMismatchException("Expecting " + fieldType + ", found " + m, m)
}
case i @ IdRHS(id) => {
val (constFieldType, constRHS) = id match {
case sid: SimpleID =>
// When the rhs value is a simpleID, it can only be a constant
// defined in the same file
resolveConst(sid)
case qid @ QualifiedID(names) =>
fieldType match {
case EnumType(enum, _) =>
val resolvedFieldType = resolveFieldType(qid.qualifier)
val value = enum.values.find(_.sid.name == names.last).getOrElse(
throw new UndefinedSymbolException(qid.fullName, qid))
(resolvedFieldType, EnumRHS(enum, value))
case t => resolveConst(qid)
}
}
if (constFieldType != fieldType)
throw new TypeMismatchException(
s"Type mismatch: Expecting $fieldType, found ${id.fullName}: $constFieldType",
id
)
constRHS
}
case _ => c
}
}
| thirstycrow/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/frontend/TypeResolver.scala | Scala | apache-2.0 | 13,958 |
package reactivemongo
import reactivemongo.bson.{ BSONArray, BSONBinary, BSONDocument }
import reactivemongo.core.protocol.MongoWireVersion
import reactivemongo.api.{
BSONSerializationPack,
NodeSetSession,
SessionTransaction,
WriteConcern
}
import reactivemongo.api.commands.{
UpdateCommand,
ResolvedCollectionCommand,
WriteConcern => WC
}
final class UpdateCommandSpec extends org.specs2.mutable.Specification {
"Update command" title
private val writer = UpdateCommand.writer(BSONSerializationPack)(Command)
section("unit")
"Update command" should {
"be written" >> {
val base = BSONDocument(
"update" -> "foo",
"ordered" -> true,
"updates" -> BSONArray(
BSONDocument(
"q" -> BSONDocument("_id" -> 1),
"u" -> BSONDocument(f"$$set" -> BSONDocument("value" -> 1)),
"upsert" -> true,
"multi" -> false),
BSONDocument(
"q" -> BSONDocument("value" -> 2),
"u" -> BSONDocument(f"$$set" -> BSONDocument("label" -> "two")),
"upsert" -> false,
"multi" -> true)))
lazy val session = new NodeSetSession(java.util.UUID.randomUUID())
val lsid = BSONDocument(
"lsid" -> BSONDocument(
"id" -> BSONBinary(session.lsid)))
val writeConcern = BSONDocument(
"writeConcern" -> BSONDocument(
"w" -> 1,
"j" -> false))
// ---
"without session" in {
writer(None, MongoWireVersion.V26)(
update1) must_=== (base ++ writeConcern)
}
"with session" in {
val write = writer(Some(session), MongoWireVersion.V26)
// w/o transaction started
write(update1) must_=== (base ++ lsid ++ writeConcern) and {
session.startTransaction(WriteConcern.Default, None).
aka("transaction") must beSuccessfulTry[(SessionTransaction, Boolean)].which { _ =>
// w/ transaction started
write(update1) must_=== (base ++ lsid ++ BSONDocument(
"txnNumber" -> 1L,
"startTransaction" -> true, // as first command in tx
"autocommit" -> false))
}
} and {
// w/o 'startTransaction' flag after first command in tx
write(update1) must_=== (base ++ lsid ++ BSONDocument(
"txnNumber" -> 1L, "autocommit" -> false))
}
}
}
}
section("unit")
// ---
private lazy val elements1 = Command.UpdateElement(
q = BSONDocument("_id" -> 1),
u = BSONDocument(f"$$set" -> BSONDocument("value" -> 1)),
upsert = true,
multi = false)
private lazy val elements2 = Command.UpdateElement(
q = BSONDocument("value" -> 2),
u = BSONDocument(f"$$set" -> BSONDocument("label" -> "two")),
upsert = false,
multi = true)
private lazy val update1 = ResolvedCollectionCommand(
collection = "foo",
command = Command.Update(
updates = Seq(elements1, elements2),
ordered = true,
writeConcern = WC.Default))
private object Command extends UpdateCommand[BSONSerializationPack.type] {
val pack = BSONSerializationPack
}
}
| ornicar/ReactiveMongo | driver/src/test/scala/UpdateCommandSpec.scala | Scala | apache-2.0 | 3,188 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.orc
import java.nio.file.Files
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileContext, Path}
import org.geotools.data.Query
import org.geotools.util.factory.Hints
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.api.FileSystemStorage.FileSystemWriter
import org.locationtech.geomesa.fs.storage.api.StorageMetadata.{PartitionMetadata, StorageFile}
import org.locationtech.geomesa.fs.storage.api.{FileSystemContext, FileSystemStorage, Metadata, NamedOptions}
import org.locationtech.geomesa.fs.storage.common.StorageKeys
import org.locationtech.geomesa.fs.storage.common.metadata.FileBasedMetadataFactory
import org.locationtech.geomesa.fs.storage.common.partitions.DateTimeScheme
import org.locationtech.geomesa.fs.storage.common.utils.PathCache
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class OrcFileSystemStorageTest extends Specification with LazyLogging {
val config = new Configuration()
// 8 bits resolution creates 3 partitions with our test data
val scheme = NamedOptions("z2-8bits")
"OrcFileSystemWriter" should {
"read and write features" in {
val sft = SimpleFeatureTypes.createType("orc-test", "*geom:Point:srid=4326,name:String,age:Int,dtg:Date")
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(1, s"name$i")
sf.setAttribute(2, s"$i")
sf.setAttribute(3, f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute(0, s"POINT(4$i 5$i)")
sf
}
withTestDir { dir =>
val context = FileSystemContext(FileContext.getFileContext(dir.toUri), config, dir)
val metadata =
new FileBasedMetadataFactory()
.create(context, Map.empty, Metadata(sft, "orc", scheme, leafStorage = true))
val storage = new OrcFileSystemStorageFactory().apply(context, metadata)
storage must not(beNull)
val writers = scala.collection.mutable.Map.empty[String, FileSystemWriter]
features.foreach { f =>
val partition = storage.metadata.scheme.getPartitionName(f)
val writer = writers.getOrElseUpdate(partition, storage.getWriter(partition))
writer.write(f)
}
writers.foreach(_._2.close())
logger.debug(s"wrote to ${writers.size} partitions for ${features.length} features")
val partitions = storage.getPartitions.map(_.name)
partitions must haveLength(writers.size)
val transformsList = Seq(null, Array("geom"), Array("geom", "dtg"), Array("geom", "name"))
val doTest = testQuery(storage, sft) _
foreach(transformsList) { transforms =>
doTest("INCLUDE", transforms, features)
doTest("IN('0', '2')", transforms, Seq(features(0), features(2)))
doTest("bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.dropRight(2))
doTest("bbox(geom,42,48,52,62) and dtg DURING 2013-12-15T00:00:00.000Z/2014-01-15T00:00:00.000Z", transforms, features.drop(2))
doTest("bbox(geom,42,48,52,62)", transforms, features.drop(2))
doTest("dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.dropRight(2))
doTest("name = 'name5' and bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.slice(5, 6))
doTest("name < 'name5'", transforms, features.take(5))
doTest("name = 'name5'", transforms, features.slice(5, 6))
doTest("age < 5", transforms, features.take(5))
}
// verify we can load an existing storage
val loaded = new FileBasedMetadataFactory().load(context)
loaded must beSome
testQuery(new OrcFileSystemStorageFactory().apply(context, loaded.get), sft)("INCLUDE", null, features)
}
}
"read and write complex features" in {
val sft = SimpleFeatureTypes.createType("orc-test-complex",
"name:String,age:Int,time:Long,height:Float,weight:Double,bool:Boolean," +
"uuid:UUID,bytes:Bytes,list:List[Int],map:Map[String,Long]," +
"line:LineString,mpt:MultiPoint,poly:Polygon,mline:MultiLineString,mpoly:MultiPolygon,g:Geometry," +
"dtg:Date,*geom:Point:srid=4326")
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute("name", s"name$i")
sf.setAttribute("age", s"$i")
sf.setAttribute("time", s"$i")
sf.setAttribute("height", s"$i")
sf.setAttribute("weight", s"$i")
sf.setAttribute("bool", Boolean.box(i < 5))
sf.setAttribute("uuid", UUID.fromString(s"00000000-0000-0000-0000-00000000000$i"))
sf.setAttribute("bytes", Array.tabulate[Byte](i)(i => i.toByte))
sf.setAttribute("list", Seq.tabulate[Integer](i)(i => Int.box(i)))
sf.setAttribute("map", (0 until i).map(i => i.toString -> Long.box(i)).toMap)
sf.setAttribute("line", s"LINESTRING(0 $i, 2 $i, 8 ${10 - i})")
sf.setAttribute("mpt", s"MULTIPOINT(0 $i, 2 3)")
sf.setAttribute("poly",
if (i == 5) {
// multipolygon example from wikipedia
"POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10),(20 30, 35 35, 30 20, 20 30))"
} else {
s"POLYGON((40 3$i, 42 3$i, 42 2$i, 40 2$i, 40 3$i))"
}
)
sf.setAttribute("mline", s"MULTILINESTRING((0 2, 2 $i, 8 6),(0 $i, 2 $i, 8 ${10 - i}))")
sf.setAttribute("mpoly", s"MULTIPOLYGON(((-1 0, 0 $i, 1 0, 0 -1, -1 0)), ((-2 6, 1 6, 1 3, -2 3, -2 6)), ((-1 5, 2 5, 2 2, -1 2, -1 5)))")
sf.setAttribute("g", sf.getAttribute(Seq("line", "mpt", "poly", "mline", "mpoly").drop(i % 5).head))
sf.setAttribute("dtg", f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute("geom", s"POINT(4$i 5$i)")
sf
}
withTestDir { dir =>
val context = FileSystemContext(FileContext.getFileContext(dir.toUri), config, dir)
val metadata =
new FileBasedMetadataFactory()
.create(context, Map.empty, Metadata(sft, "orc", scheme, leafStorage = true))
val storage = new OrcFileSystemStorageFactory().apply(context, metadata)
storage must not(beNull)
val writers = scala.collection.mutable.Map.empty[String, FileSystemWriter]
features.foreach { f =>
val partition = storage.metadata.scheme.getPartitionName(f)
val writer = writers.getOrElseUpdate(partition, storage.getWriter(partition))
writer.write(f)
}
writers.foreach(_._2.close())
logger.debug(s"wrote to ${writers.size} partitions for ${features.length} features")
val partitions = storage.getPartitions.map(_.name)
partitions must haveLength(writers.size)
val transformsList = Seq(null, Array("geom"), Array("geom", "dtg"), Array("geom", "name"))
val doTest = testQuery(storage, sft) _
foreach(transformsList) { transforms =>
doTest("INCLUDE", transforms, features)
doTest("IN('0', '2')", transforms, Seq(features(0), features(2)))
doTest("bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.dropRight(2))
doTest("bbox(geom,42,48,52,62) and dtg DURING 2013-12-15T00:00:00.000Z/2014-01-15T00:00:00.000Z", transforms, features.drop(2))
doTest("bbox(geom,42,48,52,62)", transforms, features.drop(2))
doTest("dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.dropRight(2))
doTest("name = 'name5' and bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z", transforms, features.slice(5, 6))
doTest("name < 'name5'", transforms, features.take(5))
doTest("name = 'name5'", transforms, features.slice(5, 6))
doTest("age < 5", transforms, features.take(5))
doTest("age > 5", transforms, features.drop(6))
}
}
}
"modify and delete features" in {
val sft = SimpleFeatureTypes.createType("orc-test", "*geom:Point:srid=4326,name:String,age:Int,dtg:Date")
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(1, s"name$i")
sf.setAttribute(2, s"$i")
sf.setAttribute(3, f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute(0, s"POINT(4$i 5$i)")
sf
}
withTestDir { dir =>
val context = FileSystemContext(FileContext.getFileContext(dir.toUri), config, dir)
val metadata =
new FileBasedMetadataFactory()
.create(context, Map.empty, Metadata(sft, "orc", scheme, leafStorage = true))
val storage = new OrcFileSystemStorageFactory().apply(context, metadata)
storage must not(beNull)
val writers = scala.collection.mutable.Map.empty[String, FileSystemWriter]
features.foreach { f =>
val partition = storage.metadata.scheme.getPartitionName(f)
val writer = writers.getOrElseUpdate(partition, storage.getWriter(partition))
writer.write(f)
}
writers.foreach(_._2.close())
logger.debug(s"wrote to ${writers.size} partitions for ${features.length} features")
testQuery(storage, sft)("INCLUDE", null, features)
val updater = storage.getWriter(Filter.INCLUDE)
updater.hasNext must beTrue
while (updater.hasNext) {
val feature = updater.next
if (feature.getID == "0") {
updater.remove()
} else if (feature.getID == "1") {
feature.setAttribute(1, "name-updated")
updater.write()
}
}
updater.close()
val updates = features.drop(2) :+ {
val mod = ScalaSimpleFeature.copy(features.drop(1).head)
mod.setAttribute("name", "name-updated")
mod
}
testQuery(storage, sft)("INCLUDE", null, updates)
}
}
"use custom file observers" in {
val userData = s"${StorageKeys.ObserversKey}=${classOf[TestObserverFactory].getName}"
val sft = SimpleFeatureTypes.createType("orc-test",
s"*geom:Point:srid=4326,name:String,age:Int,dtg:Date;$userData")
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
sf.setAttribute(1, s"name$i")
sf.setAttribute(2, s"$i")
sf.setAttribute(3, f"2014-01-${i + 1}%02dT00:00:01.000Z")
sf.setAttribute(0, s"POINT(4$i 5$i)")
sf
}
withTestDir { dir =>
val context = FileSystemContext(FileContext.getFileContext(dir.toUri), config, dir)
val metadata =
new FileBasedMetadataFactory()
.create(context, Map.empty, Metadata(sft, "orc", scheme, leafStorage = true))
val storage = new OrcFileSystemStorageFactory().apply(context, metadata)
storage must not(beNull)
val writers = scala.collection.mutable.Map.empty[String, FileSystemWriter]
features.foreach { f =>
val partition = storage.metadata.scheme.getPartitionName(f)
val writer = writers.getOrElseUpdate(partition, storage.getWriter(partition))
writer.write(f)
}
TestObserverFactory.observers must haveSize(3) // 3 partitions due to our data and scheme
forall(TestObserverFactory.observers)(_.closed must beFalse)
writers.foreach(_._2.close())
forall(TestObserverFactory.observers)(_.closed must beTrue)
TestObserverFactory.observers.flatMap(_.features) must containTheSameElementsAs(features)
TestObserverFactory.observers.clear()
logger.debug(s"wrote to ${writers.size} partitions for ${features.length} features")
val updater = storage.getWriter(Filter.INCLUDE)
updater.hasNext must beTrue
while (updater.hasNext) {
val feature = updater.next
if (feature.getID == "0") {
updater.remove()
} else if (feature.getID == "1") {
feature.setAttribute(1, "name-updated")
updater.write()
}
}
TestObserverFactory.observers must haveSize(2) // 2 partitions were updated
forall(TestObserverFactory.observers)(_.closed must beFalse)
updater.close()
forall(TestObserverFactory.observers)(_.closed must beTrue)
TestObserverFactory.observers.flatMap(_.features) must haveLength(2)
}
}
"transition old metadata files" in {
withTestDir { dir =>
val context = FileSystemContext(FileContext.getFileContext(dir.toUri), config, dir)
val meta = new Path(dir, "metadata.json")
context.fc.util.copy(new Path(getClass.getClassLoader.getResource("metadata-old.json").toURI), meta)
context.fc.util.exists(meta) must beTrue
PathCache.invalidate(context.fc, meta)
val metadata = new FileBasedMetadataFactory().load(context)
metadata must beSome
val storage = new OrcFileSystemStorageFactory().apply(context, metadata.get)
storage.metadata.encoding mustEqual "orc"
storage.metadata.sft.getTypeName mustEqual "example-csv"
storage.metadata.scheme must beAnInstanceOf[DateTimeScheme]
storage.getPartitions must containTheSameElementsAs(
Seq(
PartitionMetadata("2015/05/06", Seq(StorageFile("06_Wb48cb7293793447480c0885f3f4bb56a.orc", 0L)), None, 0L),
PartitionMetadata("2015/06/07", Seq(StorageFile("07_W25d311113f0b4bad819f209f00a58173.orc", 0L)), None, 0L),
PartitionMetadata("2015/10/23", Seq(StorageFile("23_Weedeb59bad0d4521b2ae46189eac4a4d.orc", 0L)), None, 0L)
)
)
}
}
}
def withTestDir[R](code: Path => R): R = {
val file = new Path(Files.createTempDirectory("gm-orc-test").toUri)
try { code(file) } finally {
file.getFileSystem(new Configuration).delete(file, true)
}
}
def testQuery(storage: FileSystemStorage,
sft: SimpleFeatureType)
(filter: String,
transforms: Array[String],
results: Seq[SimpleFeature]): MatchResult[Any] = {
import scala.collection.JavaConversions._
val query = new Query(sft.getTypeName, ECQL.toFilter(filter), transforms)
val features = {
val iter = SelfClosingIterator(storage.getReader(query))
// note: need to copy features in iterator as same object is re-used
iter.map(ScalaSimpleFeature.copy).toList
}
val attributes = Option(transforms).getOrElse(sft.getAttributeDescriptors.map(_.getLocalName).toArray)
features.map(_.getID) must containTheSameElementsAs(results.map(_.getID))
forall(features) { feature =>
feature.getAttributes must haveLength(attributes.length)
forall(attributes.zipWithIndex) { case (attribute, i) =>
feature.getAttribute(attribute) mustEqual feature.getAttribute(i)
feature.getAttribute(attribute) mustEqual results.find(_.getID == feature.getID).get.getAttribute(attribute)
}
}
}
}
| locationtech/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-orc/src/test/scala/org/locationtech/geomesa/fs/storage/orc/OrcFileSystemStorageTest.scala | Scala | apache-2.0 | 16,560 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package interpreter
import scala.tools.reflect.StdTags
import scala.reflect.runtime.{ universe => ru }
trait StdReplTags extends StdTags {
lazy val tagOfStdReplVals = tagOfStaticClass[StdReplVals]
lazy val tagOfIMain = tagOfStaticClass[IMain]
lazy val tagOfRepl = tagOfStaticClass[Repl]
}
object StdReplTags extends StdTags with StdReplTags {
val u: ru.type = ru
val m = u.runtimeMirror(getClass.getClassLoader)
}
| scala/scala | src/repl/scala/tools/nsc/interpreter/StdReplTags.scala | Scala | apache-2.0 | 741 |
/*
* Copyright 2016 Nikolay Tatarinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.rockjam.iqnotes.http
import com.typesafe.config.Config
import scala.util.Try
object HttpConfig {
def load(config: Config): Try[HttpConfig] = Try {
val c = config.getConfig("iqnotes.http")
HttpConfig(c.getString("interface"), c.getInt("port"))
}
}
final case class HttpConfig(interface: String, port: Int)
| rockjam/iq-notes | src/main/scala/com/github/rockjam/iqnotes/http/HttpConfig.scala | Scala | apache-2.0 | 942 |
package slick.compiler
import slick.ast._
import Util._
/** Expand paths of record types to reference all fields individually and
* recreate the record structure at the call site. */
class ExpandRecords extends Phase {
val name = "expandRecords"
def apply(state: CompilerState) = state.map(_.replace({ case n: PathElement => expandPath(n) }).infer())
def expandPath(n: Node): Node = n.nodeType.structural match {
case StructType(ch) =>
StructNode(ch.map { case (s, t) => (s, expandPath(n.select(s) :@ t)) })
case p: ProductType =>
ProductNode(p.elements.zipWithIndex.map { case (t, i) => expandPath(n.select(new ElementSymbol(i+1)) :@ t) })
case t => n.asInstanceOf[PathElement].untypedPath
}
}
| AtkinsChang/slick | slick/src/main/scala/slick/compiler/ExpandRecords.scala | Scala | bsd-2-clause | 733 |
package com.twitter.finagle.http2
import com.twitter.finagle.Status
import com.twitter.finagle.transport.{SimpleTransportContext, Transport, TransportContext}
import com.twitter.util.{Future, Time}
import java.net.SocketAddress
private[http2] final class DeadTransport(exn: Throwable, remote: SocketAddress)
extends Transport[Any, Any] {
override type Context = TransportContext
private[this] lazy val opsResult = Future.exception(exn)
lazy val onClose: Future[Throwable] = Future.value(exn)
val context: TransportContext = new SimpleTransportContext(remoteAddress = remote)
def write(req: Any): Future[Unit] = opsResult
def read(): Future[Any] = opsResult
def status: Status = Status.Closed
def close(deadline: Time): Future[Unit] = Future.Done
}
| luciferous/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/DeadTransport.scala | Scala | apache-2.0 | 774 |
package weld
object Weld {
def loadLibrary(filename: String): Unit = {
val error = new WeldError
WeldJNI.weld_load_library(filename, error.handle)
if (error.code != 0) {
val e = new WeldException(error)
error.close()
throw e
}
}
def setLogLevel(level: String): Unit = WeldJNI.weld_set_log_level(level)
}
| hvanhovell/weld-java | src/main/scala/weld/Weld.scala | Scala | bsd-3-clause | 346 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector.catalog
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.connector.expressions.{LogicalExpressions, Transform}
import org.apache.spark.sql.types.StructType
/**
* An implementation of catalog v2 `Table` to expose v1 table metadata.
*/
private[sql] case class V1Table(v1Table: CatalogTable) extends Table {
implicit class IdentifierHelper(identifier: TableIdentifier) {
def quoted: String = {
identifier.database match {
case Some(db) =>
Seq(db, identifier.table).map(quote).mkString(".")
case _ =>
quote(identifier.table)
}
}
private def quote(part: String): String = {
if (part.contains(".") || part.contains("`")) {
s"`${part.replace("`", "``")}`"
} else {
part
}
}
}
def catalogTable: CatalogTable = v1Table
lazy val options: Map[String, String] = {
v1Table.storage.locationUri match {
case Some(uri) =>
v1Table.storage.properties + ("path" -> uri.toString)
case _ =>
v1Table.storage.properties
}
}
override lazy val properties: util.Map[String, String] = v1Table.properties.asJava
override lazy val schema: StructType = v1Table.schema
override lazy val partitioning: Array[Transform] = {
val partitions = new mutable.ArrayBuffer[Transform]()
v1Table.partitionColumnNames.foreach { col =>
partitions += LogicalExpressions.identity(col)
}
v1Table.bucketSpec.foreach { spec =>
partitions += LogicalExpressions.bucket(spec.numBuckets, spec.bucketColumnNames: _*)
}
partitions.toArray
}
override def name: String = v1Table.identifier.quoted
override def capabilities: util.Set[TableCapability] = new util.HashSet[TableCapability]()
override def toString: String = s"UnresolvedTable($name)"
}
| bdrillard/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/connector/catalog/V1Table.scala | Scala | apache-2.0 | 2,824 |
package com.giorgioinf.twtml.spark
import org.apache.spark.{Logging, SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.StreamingLinearRegressionWithSGD
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.twitter.TwitterUtils
object LinearRegression extends Logging {
def main(args: Array[String]) {
log.info("Parsing applications arguments")
val conf = new ConfArguments()
.setAppName("twitter-stream-ml-linear-regression")
.parse(args.toList)
log.info("Initializing session stats...")
val session = new SessionStats(conf).open
log.info("Initializing Spark Machine Learning Model...")
MllibHelper.reset(conf)
val model = new StreamingLinearRegressionWithSGD()
.setNumIterations(conf.numIterations)
.setStepSize(conf.stepSize)
.setMiniBatchFraction(conf.miniBatchFraction)
.setInitialWeights(Vectors.zeros(MllibHelper.numFeatures))
log.info("Initializing Spark Context...")
val sc = new SparkContext(conf.sparkConf)
log.info("Initializing Streaming Spark Context... {} sec/batch", conf.seconds)
val ssc = new StreamingContext(sc, Seconds(conf.seconds))
log.info("Initializing Twitter stream...")
val stream = TwitterUtils.createStream(ssc, None)
.filter(MllibHelper.filtrate)
.map(MllibHelper.featurize)
.cache()
log.info("Initializing prediction model...")
val count = sc.accumulator(0L, "count")
stream.foreachRDD({ rdd =>
if (rdd.isEmpty) log.debug("batch: 0")
else {
val realPred = rdd.map{ lb =>
(lb.label, Utils.round(model.latestModel.predict(lb.features)))
}
val batch = rdd.count
count += batch
val real = realPred.map(_._1)
val pred = realPred.map(_._2)
val realStdev = Utils.round(real.stdev)
val predStdev = Utils.round(pred.stdev)
val mse = Utils.round(realPred.map{case(v, p) => math.pow((v - p), 2)}.mean())
if (log.isDebugEnabled) {
log.debug("count: {}", count)
// batch, mse (training mean squared error)
log.debug("batch: {}, mse: {}", batch, mse)
log.debug("stdev (real, pred): ({}, {})", realStdev.toLong,
predStdev.toLong)
log.debug("value (real, pred): {} ...", realPred.take(10).toArray)
}
session.update(count.value, batch, mse, realStdev, predStdev,
real.toArray, pred.toArray);
}
})
log.info("Initializing training model...")
// training after prediction
model.trainOn(stream)
// Start the streaming computation
ssc.start()
log.info("Initialization complete.")
ssc.awaitTermination()
}
}
| giorgioinf/twitter-stream-ml | spark/src/main/scala/com/giorgioinf/twtml/spark/LinearRegression.scala | Scala | gpl-3.0 | 2,829 |
package test.scala.MineSweeper
import main.scala.MineSweeper.MineField
import test.scala.UnitSuite
import main.scala.MineSweeper.MineSolver
class MineSolverUnit extends UnitSuite {
test("Solving 1 by 1 field with one mine should return the same thing") {
val lines = Array("*")
val field = MineField(lines)
assertResult(field)(MineSolver(field))
}
test("Solving 1 by 1 field without a mine should return zero") {
val lines = Array(".")
val field = MineField(lines)
val lines1 = Array("0")
val field1 = MineField(lines1)
assertResult(field1)(MineSolver(field))
}
test("Solving 1 by 3 field with one mine in the middle should return '1 * 1'") {
val field = MineField(Array(". * ."))
val solution = MineField(Array("1 * 1"))
assertResult(solution)(MineSolver(field))
}
} | ollielo/ScalaKata | src/test/scala/MineSweeper/MineSolverUnit.scala | Scala | mit | 831 |
package ua.kata
import org.scalatest.{FunSuite, Matchers}
class BinaryTreeTest extends FunSuite with Matchers {
private val emptyTree: BinaryTree = BinaryTree()
test("created tree has size 0") {
emptyTree should have size 0
}
test("tree contains added value") {
val tree: BinaryTree = emptyTree.add(1)
tree should contain(1)
}
test("tree has all added items") {
val tree: BinaryTree = emptyTree.add(2).add(1).add(3)
tree should contain(1)
tree should contain(2)
tree should contain(3)
}
private val traversable: BinaryTree = BinaryTree(6, 2, 1, 4, 3, 5, 7, 9, 8)
test("traverse tree in order") {
traversable.inOrder should contain theSameElementsInOrderAs List(1, 2, 3, 4, 5, 6, 7, 8, 9)
}
test("traverse tree preorder") {
traversable.preOrder should contain theSameElementsInOrderAs List(6, 2, 1, 4, 3, 5, 7, 9, 8)
}
test("traverse tree postorder") {
traversable.postOrder should contain theSameElementsInOrderAs List(1, 3, 5, 4, 2, 8, 9, 7, 6)
}
}
| Alex-Diez/Scala-TDD-Katas | binary_tree_kata/iteration_01/binary_tree_day_07/src/test/scala/ua/kata/BinaryTreeTest.scala | Scala | mit | 1,029 |
package org.jetbrains.plugins.dotty.codeInspection.deprecated
import com.intellij.codeInspection.ProblemHighlightType.LIKE_DEPRECATED
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.dotty.codeInspection.deprecated.WithTypeInspection._
import org.jetbrains.plugins.dotty.lang.psi.impl.base.types.DottyAndTypeElementImpl
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection, InspectionBundle}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes.{kWITH, tAND}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createElement
/**
* @author adkozlov
*/
class WithTypeInspection extends AbstractInspection(id, name) {
override def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Unit] = {
case typeElement: DottyAndTypeElementImpl =>
typeElement.findChildrenByType(kWITH).foreach { token =>
holder.registerProblem(token, message, LIKE_DEPRECATED, new ReplaceWithTypeQuickFix(token))
}
}
}
class ReplaceWithTypeQuickFix(token: PsiElement) extends AbstractFixOnPsiElement(name, token) {
override def doApplyFix(project: Project): Unit = getElement match {
case element if element.isValid =>
element.replace(createElement(tAND.toString, _ => {})(element.getManager))
}
}
object WithTypeInspection {
private[codeInspection] val id = "WithTypeDeprecated"
private[codeInspection] val name = InspectionBundle.message("replace.with.ampersand")
private[codeInspection] val message = s"With type is deprecated in Dotty. $name"
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/dotty/codeInspection/deprecated/WithTypeInspection.scala | Scala | apache-2.0 | 1,677 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import anorm._
import views._
import models._
import play.api.data.validation.Constraints
import play.api.i18n._
/**
* Manage a phone numbers database
*/
object Application extends Controller {
/**
* This result redirect to the home page.
*/
val Home = Redirect(routes.Application.list(0, 2, ""))
/**
* The number form.
*/
val numberForm = Form(
mapping(
"id" -> ignored(NotAssigned:Pk[Long]),
"name" -> nonEmptyText.verifying(Number.nameCheckConstraint),
"phoneNumber" -> nonEmptyText.verifying(Number.phoneNumberIsUniqueConstraint, Number.phoneNumberIsRealConstraint)
)(Number.apply)(Number.unapply)
)
/**
* Redirect to numbers list
*/
def index = Action { Home }
/**
* Display the paginated list of phone numbers.
*
* @param page Current page number (starts from 0)
* @param orderBy Column to be sorted
* @param filter Filter applied on names
*/
def list(page: Int, orderBy: Int, filter: String) = Action { implicit request =>
Ok(html.list(
Number.list(page = page, orderBy = orderBy, filter = ("%"+filter+"%")),
orderBy, filter
))
}
/**
* Display the 'new number form'.
*/
def create = Action {
Ok(html.createForm(numberForm))
}
/**
* Handle the 'new number form' submission.
*/
def save = Action { implicit request =>
numberForm.bindFromRequest.fold(
formWithErrors => BadRequest(html.createForm(formWithErrors)),
number => {
Number.insert(number)
Home.flashing("success" -> Messages("number.created", number.name))
}
)
}
/**
* Display the 'edit form' of a existing number.
*
* @param id Id of the number to edit
*/
def edit(id: Long) = Action {
Number.findById(id).map { number =>
Ok(html.editForm(id, numberForm.fill(number)))
}.getOrElse(NotFound)
}
/**
* Handle the 'edit form' submission.
*
* @param id Id of the number to edit
*/
def update(id: Long) = Action { implicit request =>
numberForm.bindFromRequest.fold(
formWithErrors => BadRequest(html.editForm(id, formWithErrors)),
number => {
Number.update(id, number)
Home.flashing("success" -> Messages("number.updated", number.name))
}
)
}
/**
* Handle number deletion.
*
* @param id Id of the number to delete
*/
def delete(id: Long) = Action {
Number.delete(id)
Home.flashing("success" -> Messages("number.deleted"))
}
} | meln1k/phoneBook | app/controllers/Application.scala | Scala | mit | 2,603 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.bloomberg
import java.util
import com.datamountaineer.streamreactor.connect.bloomberg.avro.AvroSchemaGenerator
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericRecord}
import org.apache.avro.io.DecoderFactory
import org.codehaus.jackson.JsonNode
import org.scalatest.{Matchers, WordSpec}
import scala.collection.JavaConverters._
class AvroSchemaGeneratorTest extends WordSpec with Matchers {
val namespace = "io.confluent.connect.avro"
val schemaGenerator = new AvroSchemaGenerator(namespace)
def setString(schema: Schema): Schema = {
GenericData.setStringType(schema, GenericData.StringType.String)
schema
}
"AvroSchema" should {
"handle boolean input" in {
schemaGenerator.create("ConnectDefault", true) shouldBe Schema.create(Schema.Type.BOOLEAN)
schemaGenerator.create("ConnectDefault", false) shouldBe Schema.create(Schema.Type.BOOLEAN)
}
"handle char input" in {
schemaGenerator.create("ConnectDefault", 'a') shouldBe setString(Schema.create(Schema.Type.STRING))
}
"handle string input" in {
schemaGenerator.create("ConnectDefault", "cosmic gate") shouldBe setString(Schema.create(Schema.Type.STRING))
}
"handle long input" in {
schemaGenerator.create("ConnectDefault", 1L) shouldBe Schema.create(Schema.Type.LONG)
}
"handle float input" in {
schemaGenerator.create("ConnectDefault", 34.5f) shouldBe Schema.create(Schema.Type.FLOAT)
}
"handle double input" in {
schemaGenerator.create("ConnectDefault", -324.23d) shouldBe Schema.create(Schema.Type.DOUBLE)
}
"handle List[int] input" in {
schemaGenerator.create("ConnectDefault", Seq(1, 2, 3).asJava) shouldBe Schema.createArray(Schema.create(Schema.Type.INT))
}
"handle LinkedHashMap[String,Any] input" in {
val map = new java.util.LinkedHashMap[String, Any]
map.put("k1", 1)
map.put("k2", "minime")
val expectedSchema = Schema.createRecord("ConnectDefault", null, namespace, false)
val default: JsonNode = null
val fields = Seq(
new Schema.Field("k1", AvroSchemaGenerator.optionalSchema(Schema.Type.INT), null, default),
new Schema.Field("k2", AvroSchemaGenerator.optionalSchema(Schema.Type.STRING), null, default)
).asJava
expectedSchema.setFields(fields)
val actualSchema = schemaGenerator.create("ConnectDefault", map)
actualSchema shouldBe expectedSchema
}
"raise an error if the input is not long, float,char, string,LinkedHashMap[String, Any],List[Any]" in {
intercept[RuntimeException] {
schemaGenerator.create("ConnectDefault", BigDecimal(131))
}
intercept[RuntimeException] {
schemaGenerator.create("ConnectDefault", Map("s" -> 11).asJava)
}
}
"create the appropriate schema for the given linkedhashmap entry" in {
val map = new util.LinkedHashMap[String, Any]()
map.put("firstName", "John")
map.put("lastName", "Smith")
map.put("age", 25)
val mapAddress = new util.LinkedHashMap[String, Any]()
mapAddress.put("streetAddress", "21 2nd Street")
mapAddress.put("city", "New York")
mapAddress.put("state", "NY")
mapAddress.put("postalCode", "10021")
map.put("address", mapAddress)
val phoneMap = new util.LinkedHashMap[String, Any]()
phoneMap.put("type", "home")
phoneMap.put("number", "212 555-1234")
val faxMap = new util.LinkedHashMap[String, Any]()
faxMap.put("type", "fax")
faxMap.put("number", "646 555-4567")
map.put("phoneNumber", Seq(phoneMap, faxMap).asJava)
val genderMap = new java.util.LinkedHashMap[String, Any]()
genderMap.put("type", "male")
map.put("gender", genderMap)
val actualSchema = schemaGenerator.create("ConnectDefault", map)
val expectedSchema = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream(s"/person.avsc"))
actualSchema.toString(true) shouldBe expectedSchema.toString(true)
}
}
}
object AvroSchemaGeneratorTest {
def deserializeAvroRecord(data: Array[Byte], schema: Schema): GenericRecord = {
val reader = new GenericDatumReader[GenericRecord](schema)
val decoder = DecoderFactory.get().binaryDecoder(data, null)
reader.read(null, decoder)
}
}
| CodeSmell/stream-reactor | kafka-connect-bloomberg/src/test/scala/com/datamountaineer/streamreactor/connect/bloomberg/AvroSchemaGeneratorTest.scala | Scala | apache-2.0 | 5,004 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
package params
import javax.swing.Icon
import com.intellij.lang.java.lexer.JavaLexer
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi._
import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope}
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.base.ScPrimaryConstructor
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScFunctionExpr, ScUnderScoreSectionUtil}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScMember}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScImportableDeclarationsOwner, ScModifierListOwner, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.types.api.FunctionType
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{ScParameterizedType, ScType, ScTypeExt, ScalaType}
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
import scala.annotation.tailrec
import scala.collection.immutable.HashSet
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScParameter extends ScTypedDefinition with ScModifierListOwner with
PsiParameter with ScAnnotationsHolder with ScImportableDeclarationsOwner {
def getTypeElement: PsiTypeElement
def isWildcard: Boolean = "_" == name
def isVarArgs = isRepeatedParameter
def computeConstantValue = null
def normalizeDeclaration() {}
def hasInitializer = false
def getInitializer = null
def typeElement: Option[ScTypeElement]
def paramType: Option[ScParameterType] = findChild(classOf[ScParameterType])
override def getTextOffset: Int = nameId.getTextRange.getStartOffset
override def getIcon(flags: Int): Icon = Icons.PARAMETER
def isRepeatedParameter: Boolean
def isCallByNameParameter: Boolean
def baseDefaultParam: Boolean
def getActualDefaultExpression: Option[ScExpression]
def getRealParameterType(ctx: TypingContext = TypingContext.empty): TypeResult[ScType] = {
if (!isRepeatedParameter) return getType(ctx)
getType(ctx) match {
case f@Success(tp: ScType, elem) =>
val seq = ScalaPsiManager.instance(getProject).getCachedClass("scala.collection.Seq", getResolveScope, ScalaPsiManager.ClassCategory.TYPE)
if (seq != null) {
Success(ScParameterizedType(ScalaType.designator(seq), Seq(tp)), elem)
} else f
case f => f
}
}
def getDeclarationScope = PsiTreeUtil.getParentOfType(this, classOf[ScParameterOwner], classOf[ScFunctionExpr])
def deprecatedName: Option[String]
def owner: PsiElement = {
ScalaPsiUtil.getContextOfType(this, true, classOf[ScFunctionExpr],
classOf[ScFunction], classOf[ScPrimaryConstructor])
}
def remove()
def isImplicitParameter: Boolean = {
val clause = PsiTreeUtil.getParentOfType(this, classOf[ScParameterClause])
if (clause == null) return false
clause.isImplicit
}
def index = getParent.getParent match {
case parameters: ScParameters => parameters.params.indexOf(this)
case _ => getParent.asInstanceOf[ScParameterClause].parameters.indexOf(this)
}
override def getName: String = {
val res = super.getName
if (JavaLexer.isKeyword(res, LanguageLevel.HIGHEST)) "_" + res
else res
}
abstract override def getUseScope = {
val specificScope = getDeclarationScope match {
case null => GlobalSearchScope.EMPTY_SCOPE
case expr: ScFunctionExpr => new LocalSearchScope(expr)
case clazz: ScClass if clazz.isCase => clazz.getUseScope
case clazz: ScClass if this.isInstanceOf[ScClassParameter] => clazz.getUseScope //for named parameters
case d => d.getUseScope
}
specificScope.intersectWith(super.getUseScope)
}
def getType: PsiType = getRealParameterType(TypingContext.empty).getOrNothing.toPsiType(getProject, getResolveScope)
def isAnonymousParameter: Boolean = getContext match {
case clause: ScParameterClause => clause.getContext.getContext match {
case f: ScFunctionExpr => true
case _ => false
}
case _ => false
}
def expectedParamType: Option[ScType] = getContext match {
case clause: ScParameterClause => clause.getContext.getContext match {
// For parameter of anonymous functions to infer parameter's type from an appropriate
// an. fun's type
case f: ScFunctionExpr =>
var flag = false
var result: Option[ScType] = None //strange logic to handle problems with detecting type
for (tp <- f.expectedTypes(fromUnderscore = false) if !flag) {
@tailrec
def applyForFunction(tp: ScType, checkDeep: Boolean) {
tp.removeAbstracts match {
case FunctionType(ret, _) if checkDeep => applyForFunction(ret, checkDeep = false)
case FunctionType(_, params) if params.length == f.parameters.length =>
val i = clause.parameters.indexOf(this)
if (result.isDefined) {
result = None
flag = true
} else result = Some(params(i))
case any if ScalaPsiUtil.isSAMEnabled(f)=>
//infer type if it's a Single Abstract Method
ScalaPsiUtil.toSAMType(any, f.getResolveScope) match {
case Some(FunctionType(_, params)) =>
val i = clause.parameters.indexOf(this)
if (i < params.length) result = Some(params(i))
case _ =>
}
case _ =>
}
}
applyForFunction(tp, ScUnderScoreSectionUtil.underscores(f).nonEmpty)
}
result
case _ => None
}
}
def getTypeNoResolve: PsiType = PsiType.VOID
@Cached(synchronized = false, ModCount.getBlockModificationCount, this)
def isDefaultParam: Boolean = calcIsDefaultParam(this, HashSet.empty)
@tailrec
private def calcIsDefaultParam(param: ScParameter, visited: HashSet[ScParameter]): Boolean = {
if (param.baseDefaultParam) return true
if (visited.contains(param)) return false
getSuperParameter match {
case Some(superParam) =>
calcIsDefaultParam(superParam, visited + param)
case _ => false
}
}
def getDefaultExpression: Option[ScExpression] = {
val res = getActualDefaultExpression
if (res.isEmpty) {
getSuperParameter.flatMap(_.getDefaultExpression)
} else res
}
def getDefaultExpressionInSource: Option[ScExpression] = {
val res = getActualDefaultExpression
if (res.isEmpty) {
getSuperParameter.flatMap(_.getDefaultExpressionInSource)
} else {
getContainingFile match {
case file: ScalaFile =>
if (file.isCompiled) {
val containingMember = PsiTreeUtil.getContextOfType(this, true, classOf[ScMember])
if (containingMember == null) res
else {
def extractFromParameterOwner(owner: ScParameterOwner): Option[ScExpression] = {
owner.parameters.find(_.name == name) match {
case Some(param) => param.getDefaultExpression
case _ => res
}
}
containingMember match {
case c: ScClass =>
c.getSourceMirrorClass match {
case c: ScClass => extractFromParameterOwner(c)
case _ => res
}
case f: ScFunction =>
f.getNavigationElement match {
case f: ScFunction => extractFromParameterOwner(f)
case _ => res
}
case _ => res
}
}
} else res
case _ => res
}
}
}
def getSuperParameter: Option[ScParameter] = {
getParent match {
case clause: ScParameterClause =>
val i = clause.parameters.indexOf(this)
clause.getParent match {
case p: ScParameters =>
val j = p.clauses.indexOf(clause)
p.getParent match {
case fun: ScFunction =>
fun.superMethod match {
case Some(method: ScFunction) =>
val clauses: Seq[ScParameterClause] = method.paramClauses.clauses
if (j >= clauses.length) return None
val parameters: Seq[ScParameter] = clauses.apply(j).parameters
if (i >= parameters.length) return None
Some(parameters.apply(i))
case _ => None
}
case _ => None
}
case _ => None
}
case _ => None
}
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/statements/params/ScParameter.scala | Scala | apache-2.0 | 8,983 |
import scala.quoted._
object Test {
def f[T: Type](using QuoteContext) = {
implicitly[Type[Int]]
implicitly[Type[List[Int]]]
implicitly[Type[T]]
implicitly[Type[List[T]]]
}
}
| som-snytt/dotty | tests/pos/typetags.scala | Scala | apache-2.0 | 197 |
/*
** Copyright [2013-2016] [Megam Systems]
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
package models.json.team
import scalaz._
import scalaz.NonEmptyList._
import scalaz.Validation
import scalaz.Validation._
import Scalaz._
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.util.Date
import java.nio.charset.Charset
import io.megam.auth.funnel.FunnelErrors._
import controllers.Constants._
import models.team.{ OrganizationsResult }
/**
* @author morpheyesh
*
*/
class OrganizationsResultSerialization(charset: Charset = UTF8Charset) extends io.megam.json.SerializationBase[OrganizationsResult] {
protected val JSONClazKey = controllers.Constants.JSON_CLAZ
protected val IdKey = "id"
protected val AccountIdKey = "accounts_id"
protected val NameKey = "name"
protected val RelatedOrgsKey = "related_orgs"
protected val CreatedAtKey ="created_at"
override implicit val writer = new JSONW[OrganizationsResult] {
import RelatedOrgsListSerialization.{ writer => RelatedOrgsListWriter}
override def write(h: OrganizationsResult): JValue = {
JObject(
JField(IdKey, toJSON(h.id)) ::
JField(AccountIdKey, toJSON(h.accounts_id)) ::
JField(JSONClazKey, toJSON("Megam::Organizations")) ::
JField(NameKey, toJSON(h.name)) ::
// JField(RelatedOrgsKey, toJSON(h.related_orgs)) ::
JField(RelatedOrgsKey, toJSON(h.related_orgs)(RelatedOrgsListWriter)) ::
JField(CreatedAtKey, toJSON(h.created_at)) ::
Nil)
}
}
override implicit val reader = new JSONR[OrganizationsResult] {
import RelatedOrgsListSerialization.{ reader => RelatedOrgsListReader}
override def read(json: JValue): Result[OrganizationsResult] = {
val idField = field[String](IdKey)(json)
val accountIdField = field[String](AccountIdKey)(json)
val nameField = field[String](NameKey)(json)
// val relatedOrgsField = field[List[String]](RelatedOrgsKey)(json)
val relatedOrgsField= field[List[String]](RelatedOrgsKey)(json)(RelatedOrgsListReader)
val createdAtField = field[String](CreatedAtKey)(json)
(idField |@|accountIdField |@| nameField |@| relatedOrgsField |@| createdAtField) {
(id: String, accountId: String, name: String, related_orgs: List[String], created_at: String) =>
new OrganizationsResult(id, accountId, name, related_orgs, created_at)
}
}
}
}
| meglytics/bidi | app/models/json/team/OrganizationsResultSerializations.scala | Scala | mit | 2,999 |
/*
* Copyright (c) 2015, Nightfall Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package moe.nightfall.instrumentality.editor
import java.io.{File, FileInputStream, FileOutputStream, IOException}
import java.net.URL
import java.util.zip.{ZipEntry, ZipInputStream}
import moe.nightfall.instrumentality.ModelCache.{DownloadingPMXFilenameLocator, IPMXFilenameLocator}
import moe.nightfall.instrumentality.RecommendedInfoCache.DownloadableEntry
import moe.nightfall.instrumentality.{ModelCache}
/**
* Used for downloading models by DownloaderElement.
* Created on 09/10/15.
*/
class ModelDownloadTask(val n: DownloadableEntry, val downloadName: String) extends MeasurableTask with Runnable {
// Scala is magic...
var generalTask = "Starting download thread..."
var subTask = ""
var progress = 0.0d
var state = TaskState.Prestart
override def run(): Unit = {
progress = 0
generalTask = "Downloading ZIP..."
subTask = n.download
state = TaskState.Running
val fakeIt = false
try {
if (fakeIt) {
Thread.sleep(2500)
progress = 0.5d
Thread.sleep(2500)
progress = 1
} else {
val fakeIt2 = false
var f = new File("nodos.zip")
if (!fakeIt2) {
val uc = new URL(n.download).openConnection()
uc.addRequestProperty("User-Agent", "MikuMikuCraft")
uc.connect()
val contentLength = uc.getContentLengthLong
f = File.createTempFile("MMCdl", ".dat")
val fos = new FileOutputStream(f)
val fis = uc.getInputStream
var todoLen = contentLength
val buffer = new Array[Byte](1024)
while (todoLen > 0) {
progress = 0.5d - (todoLen / (contentLength * 2d))
var readLen = fis.read(buffer)
// Not sure if this case will ever happen, but just to be sure...
if (readLen < 0)
throw new IOException("Download failure")
todoLen -= readLen
fos.write(buffer, 0, readLen)
}
fis.close()
fos.close()
} else {
progress = 0.5d
Thread.sleep(1000)
}
generalTask = "Loading ZIP..."
val fis = new FileInputStream(f)
val zais = new ZipInputStream(fis)
var za = Seq[ZipEntry]()
var entry: ZipEntry = zais.getNextEntry
while (entry != null) {
println(entry.getName)
za :+= entry
zais.closeEntry()
entry = zais.getNextEntry
}
fis.close()
var root = n.downloadDir.toLowerCase
if (root == "/")
root = ""
var fileProgress = 0.1d
val filenameLocator = new IPMXFilenameLocator {
override def listFiles(): Seq[String] = {
return za.filter(_.getName.toLowerCase.startsWith(root))
.filter(!_.isDirectory)
.map(af => af.getName.toLowerCase.substring(root.length))
}
override def apply(filename: String): Array[Byte] = {
subTask = filename
progress += fileProgress
val s = root + filename.toLowerCase.replace('\\\\', '/')
// Ok, now "seek"
val fis = new FileInputStream(f)
val zais = new ZipInputStream(fis)
var entry: ZipEntry = zais.getNextEntry
while (entry != null) {
if (entry.getName.toLowerCase.equals(s)) {
// OK
val buf = new Array[Byte](entry.getSize.toInt)
var remaining = buf.length
while (remaining > 0) {
val len = zais.read(buf, buf.length - remaining, remaining)
if (len < 0)
throw new IOException("Did not read whole file")
remaining -= len
}
fis.close()
return buf
}
zais.closeEntry()
entry = zais.getNextEntry
}
fis.close()
throw new IOException("Could not find a file")
}
}
fileProgress = 0.5d / filenameLocator.listFiles().size
val name = ModelCache.findFreeName(downloadName)
ModelCache.getInternal(new DownloadingPMXFilenameLocator(filenameLocator, name, -1), name, true)
}
state = TaskState.Success
} catch {
case e: IOException => {
generalTask = subTask + " : failed"
subTask = e.toString
state = TaskState.Failure
}
}
}
}
| Nightfall/Instrumentality | core/src/main/scala/moe/nightfall/instrumentality/editor/ModelDownloadTask.scala | Scala | bsd-2-clause | 6,832 |
object Solution {
def cal_area(a:Array[Int], b:Array[Int]):Double = {
0.5 * (a(1)*b(0) - a(0) * b(1))
}
def cal_length(a:Array[Int], b:Array[Int]):Double = {
val x:Double = a(0) - b(0)
val y:Double = a(1) - b(1)
Math.sqrt(x*x + y*y)
}
def main(args: Array[String]) {
/* Enter your code here. Read input from STDIN. Print output to STDOUT. Your class should be named Solution
*/
val in = io.Source.stdin.getLines
val n = in.next.toInt
val d = in.map(_.split(" ").map(_.toInt)).toList
val ans = (0 to d.size -1).map(x => cal_area(d((x+1)%d.size), d(x))).sum
println(ans)
}
} | zearom32/hackerrankFP | scala/introduction/lambda-march-compute-the-area-of-a-polygon.scala | Scala | mit | 682 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.controllers
import javax.inject.{Inject, Singleton}
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.libs.json.Json
import uk.gov.hmrc.bforms.controllers.helpers.FormHelpers._
import uk.gov.hmrc.bforms.models._
import uk.gov.hmrc.bforms.service.SaveService
import uk.gov.hmrc.play.frontend.controller.FrontendController
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class SummaryGen @Inject()(val messagesApi: MessagesApi, val sec: SecuredActions)(implicit ec: ExecutionContext)
extends FrontendController with I18nSupport {
def summaryById(formTypeId: FormTypeId, version: String, formId: FormId) =
sec.SecureWithTemplateAsync(formTypeId, version) { authContext =>
implicit request =>
SaveService.getFormById(formTypeId, version, formId).map( formData =>
Summary(request.formTemplate).renderSummary(formDataMap(formData), formId)
)
}
def submit(formTypeId: FormTypeId, version: String) = sec.SecureWithTemplateAsync(formTypeId, version) { authContext =>
implicit request =>
processResponseDataFromBody(request) { data =>
getActions(data, FieldId("save")) match {
case "Exit" :: Nil =>
Future.successful(Ok)
case "Continue" :: Nil =>
anyFormId(data) match {
case Some(formId) =>
SaveService.sendSubmission(formTypeId, formId).
map( r => Ok(Json.obj("envelope" -> r.body, "formId" -> Json.toJson(formId))))
case None =>
Future.successful(BadRequest("No formId"))
}
case _ =>
Future.successful(BadRequest("Cannot determine action"))
}
}
}
}
| VlachJosef/bforms-frontend | app/uk/gov/hmrc/bforms/controllers/SummaryGen.scala | Scala | apache-2.0 | 2,370 |
package com.sksamuel.elastic4s.requests.validate
import com.fasterxml.jackson.annotation.JsonProperty
import com.sksamuel.elastic4s.requests.common.Shards
case class ValidateResponse(valid: Boolean, @JsonProperty("_shards") shards: Shards, explanations: Seq[Explanation]) {
def isValid: Boolean = valid
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/validate/ValidateResponse.scala | Scala | apache-2.0 | 309 |
/*
* Copyright 2010-2011 Benjamin Lings
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.me.lings.scalaguice
import com.google.inject._
object KeyExtensions {
import java.lang.annotation.{Annotation => JAnnotation}
implicit def enrichTypeLiteral[T](t: TypeLiteral[T]) = new {
def toKey: Key[T] = Key.get(t)
def annotatedWith(annotation: JAnnotation): Key[T] = Key.get(t, annotation)
def annotatedWith[TAnn <: JAnnotation : ClassManifest]:Key[T] =
Key.get(t, annotation[TAnn])
}
}
| benlings/scala-guice | src/main/scala/KeyExtensions.scala | Scala | apache-2.0 | 1,068 |
package spatial.models.characterization
object LabeledPairs {
abstract class PatternList {
def toSeq: Seq[Pattern]
def |(that: PatternList) = Seq(this, that)
}
case class ListProduct(ps: Seq[Pattern], last: Product) extends PatternList {
def *(y: (Int,String)) = ListProduct(ps, last * y)
def +(y: (Int,String)) = ListLinear(ps :+ last, Linear(y))
def *(y: Product) = ListProduct(ps, last * y)
def +(y: Product) = ListProduct(ps :+ last, y)
def +(y: Linear) = ListLinear(ps :+ last, y)
def toSeq = ps :+ last
}
case class ListLinear(ps: Seq[Pattern], last: Linear) extends PatternList {
def *(y: (Int,String)) = ListProduct(ps, last*y)
def +(y: (Int,String)) = ListLinear(ps :+ last, Linear(y))
def +(y: Product) = ListProduct(ps :+ last, y)
def +(y: Linear) = ListLinear(ps :+ last, y)
def toSeq = ps :+ last
}
implicit def PatternListToSeq(x: PatternList): Seq[PatternList] = Seq(x)
implicit def LabeledPairToSeq(x: (Int,String)): Seq[PatternList] = Seq(ListLinear(Nil,Linear(x)))
implicit def LinearToSeq(x: Linear): Seq[PatternList] = Seq(ListLinear(Nil,x))
implicit def ProductToSeq(x: Product): Seq[PatternList] = Seq(ListProduct(Nil,x))
abstract class Pattern { def label: String }
case class Product(xs: (Int,String)*) extends Pattern {
def *(y: (Int,String)) = Product((y +: xs):_*)
def +(y: (Int,String)) = ListLinear(Seq(this), Linear(y))
def *(y: Product) = Product((this.xs ++ y.xs):_*)
def +(y: Product) = ListProduct(Seq(this), y)
def +(y: ListProduct) = Seq(ListProduct(Nil,this), y)
def |(that: (Int,String)) = Seq(ListProduct(Nil,this), ListLinear(Nil,Linear(that)))
def |(that: Product) = Seq(ListProduct(Nil,this), ListProduct(Nil,that))
def |(that: Linear) = Seq(ListProduct(Nil,this), ListLinear(Nil,that))
def |(that: ListProduct) = Seq(ListProduct(Nil,this), that)
def |(that: ListLinear) = Seq(ListProduct(Nil,this), that)
def label = xs.map(_._2).mkString("*")
def ins = xs.map(_._1)
}
case class Linear(x: (Int,String)) extends Pattern {
def *(y: (Int,String)) = Product(x,y)
def +(y: (Int,String)) = ListLinear(Seq(this), Linear(y))
def +(y: Product) = ListProduct(Seq(this), y)
def +(y: ListProduct) = Seq(ListLinear(Nil,this), y)
def |(that: (Int,String)) = Seq(ListLinear(Nil,this), ListLinear(Nil,Linear(that)))
def |(that: Product) = Seq(ListLinear(Nil,this), ListProduct(Nil,that))
def |(that: Linear) = Seq(ListLinear(Nil,this), ListLinear(Nil,that))
def |(that: ListProduct) = Seq(ListLinear(Nil,this), that)
def |(that: ListLinear) = Seq(ListLinear(Nil,this), that)
def label = x._2
}
implicit class LabeledPairOps(x: (Int,String)) {
def *(y: (Int,String)) = Product(x, y)
def +(y: (Int,String)) = ListLinear(Seq(Linear(x)), Linear(y))
def +(y: Product) = ListProduct(Seq(Linear(x)),y)
def +(y: Linear) = ListLinear(Seq(Linear(x)),y)
def +(y: ListProduct) = ListProduct(Linear(x) +: y.ps, y.last)
def +(y: ListLinear) = ListLinear(Linear(x) +: y.ps, y.last)
def |(that: (Int,String)) = Seq(ListLinear(Nil,Linear(x)), ListLinear(Nil,Linear(that)))
def |(that: Product) = Seq(ListLinear(Nil,Linear(x)), ListProduct(Nil,that))
def |(that: Linear) = Seq(ListLinear(Nil,Linear(x)), ListLinear(Nil,that))
def |(that: ListProduct) = Seq(ListLinear(Nil,Linear(x)), that)
def |(that: ListLinear) = Seq(ListLinear(Nil,Linear(x)), that)
}
implicit class PatternListSeqOps(x: Seq[PatternList]) {
def |(that: (Int,String)) = x ++ ListLinear(Nil,Linear(that))
def |(that: Product) = x ++ ListProduct(Nil,that)
def |(that: Linear) = x ++ ListLinear(Nil,that)
def |(that: ListProduct) = x ++ that
def |(that: ListLinear) = x ++ that
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/models/characterization/LabeledPairs.scala | Scala | mit | 3,807 |
package skinny.engine.routing
import javax.servlet.http.HttpServletRequest
import skinny.engine.{ RouteTransformer, MultiParams, Action }
import skinny.engine.data.MultiMap
/**
* A route is a set of matchers and an action. A route is considered to match
* if all of its route matchers return Some. If a route matches, its action
* may be invoked. The route parameters extracted by the matchers are made
* available to the action.
*/
case class Route(
routeMatchers: Seq[RouteMatcher] = Seq.empty,
action: Action,
contextPath: HttpServletRequest => String = _ => "",
metadata: Map[Symbol, Any] = Map.empty) {
/**
* Optionally returns this route's action and the multi-map of route
* parameters extracted from the matchers. Each matcher's returned params
* are merged into those of the previous. If any matcher returns None,
* None is returned. If there are no route matchers, some empty map is
* returned.
*/
def apply(requestPath: String): Option[MatchedRoute] = {
routeMatchers.foldLeft(Option(MultiMap())) {
(acc: Option[MultiParams], routeMatcher: RouteMatcher) =>
for {
routeParams <- acc
matcherParams <- routeMatcher(requestPath)
} yield routeParams ++ matcherParams
} map { routeParams => MatchedRoute(action, routeParams) }
}
/**
* The reversible matcher of a route is the first reversible matcher, if
* any. This matcher may be used to generate URIs.
*/
lazy val reversibleMatcher: Option[RouteMatcher] = {
routeMatchers find (_.isInstanceOf[ReversibleRouteMatcher])
}
/**
* Determines whether this is a reversible route.
*/
lazy val isReversible: Boolean = !reversibleMatcher.isEmpty
override def toString: String = routeMatchers.mkString(" ")
}
object Route {
def apply(transformers: Seq[RouteTransformer], action: Action): Route = {
apply(transformers, action, (_: HttpServletRequest) => "")
}
// TODO: remove HttpServletRequest from contextPath
def apply(transformers: Seq[RouteTransformer], action: Action, contextPath: HttpServletRequest => String): Route = {
val route = Route(action = action, contextPath = contextPath)
transformers.foldLeft(route) { (route, transformer) => transformer(route) }
}
def appendMatcher(matcher: RouteMatcher): RouteTransformer = { (route: Route) =>
route.copy(routeMatchers = route.routeMatchers :+ matcher)
}
}
| holycattle/skinny-framework | engine/src/main/scala/skinny/engine/routing/Route.scala | Scala | mit | 2,433 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.join
import org.apache.flink.api.common.state._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.co.CoProcessFunction
import org.apache.flink.table.api.StreamQueryConfig
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
/**
* Connect data for left stream and right stream. Only use for left or right join without
* non-equal predicates.
*
* @param leftType the input type of left stream
* @param rightType the input type of right stream
* @param resultType the output type of join
* @param genJoinFuncName the function code without any non-equi condition
* @param genJoinFuncCode the function name without any non-equi condition
* @param isLeftJoin the type of join, whether it is the type of left join
* @param queryConfig the configuration for the query to generate
*/
class NonWindowLeftRightJoin(
leftType: TypeInformation[Row],
rightType: TypeInformation[Row],
resultType: TypeInformation[CRow],
genJoinFuncName: String,
genJoinFuncCode: String,
isLeftJoin: Boolean,
queryConfig: StreamQueryConfig)
extends NonWindowOuterJoin(
leftType,
rightType,
resultType,
genJoinFuncName,
genJoinFuncCode,
isLeftJoin,
queryConfig) {
override def open(parameters: Configuration): Unit = {
super.open(parameters)
val joinType = if (isLeftJoin) "Left" else "Right"
LOG.debug(s"Instantiating NonWindow${joinType}OuterJoin")
}
/**
* Puts or Retract an element from the input stream into state and search the other state to
* output records meet the condition. The input row will be preserved and appended with null, if
* there is no match. Records will be expired in state if state retention time has been
* specified.
*/
override def processElement(
value: CRow,
ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
out: Collector[CRow],
timerState: ValueState[Long],
currentSideState: MapState[Row, JTuple2[Long, Long]],
otherSideState: MapState[Row, JTuple2[Long, Long]],
recordFromLeft: Boolean): Unit = {
val inputRow = value.row
updateCurrentSide(value, ctx, timerState, currentSideState)
cRowWrapper.reset()
cRowWrapper.setCollector(out)
cRowWrapper.setChange(value.change)
// join other side data
if (recordFromLeft == isLeftJoin) {
preservedJoin(inputRow, recordFromLeft, otherSideState)
} else {
retractJoin(value, recordFromLeft, currentSideState, otherSideState)
}
}
}
| zhangminglei/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/NonWindowLeftRightJoin.scala | Scala | apache-2.0 | 3,600 |
package com.tam.cobol_interpreter.parser.schema
import java.io.InputStream
import com.tam.cobol_interpreter.parser.schema.expressions.{ExpressionGenerator, ParserSchemaExpression}
import com.tam.cobol_interpreter.tools.InputStreamTool
/**
* Created by tamu on 1/4/15.
*/
object ParserSchemaFactory {
def createSchema(schemaStream: InputStream):ParserSchema =
createSchema(InputStreamTool.read(schemaStream))
def createSchema(schemaString: String): ParserSchema =
createSchema(ExpressionGenerator.generateExpressionTree(schemaString))
def createSchema(expressionList: Array[ParserSchemaExpression]): ParserSchema =
new ParserSchema(expressionList)
}
| tamsanh/scala-cobol-interpreter | src/main/scala/com/tam/cobol_interpreter/parser/schema/ParserSchemaFactory.scala | Scala | apache-2.0 | 672 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.behavior
import org.goseumdochi.control._
class PerspectiveBehaviorSpec
extends ScriptedBehaviorSpecification("perspective-test.conf")
{
"scripted behavior" should
{
"perform perspective orientation" in ScriptedBehaviorExample(
"/scripted/measure.json",
classOf[PerspectiveOrientationFsm],
ControlActor.ORIENTATION_ACTOR_NAME,
"perspective-orientation-test.conf")
"impersonate a mountain" in ScriptedBehaviorExample(
"/scripted/doze.json",
classOf[DozeFsm])
"go round in squares" in ScriptedBehaviorExample(
"/scripted/square.json",
classOf[SquareFsm],
ControlActor.BEHAVIOR_ACTOR_NAME,
"square-test.conf")
}
}
| lingeringsocket/goseumdochi | base/src/test/scala/org/goseumdochi/behavior/PerspectiveBehaviorSpec.scala | Scala | apache-2.0 | 1,358 |
package com.teambytes.inflatable.raft.protocol
import com.teambytes.inflatable.raft.model.Term
import akka.actor.ActorRef
private[protocol] trait InternalProtocol extends Serializable {
// just some types to make it more clear when these messages are sent, not actualy used (could be stripped)
sealed trait InternalMessage extends Message[Internal]
sealed trait FollowerResponse extends Message[Internal]
sealed trait ElectionMessage extends Message[Internal]
sealed trait LeaderMessage extends Message[Internal]
case object BeginElection extends ElectionMessage
case class VoteCandidate(term: Term) extends ElectionMessage
case class DeclineCandidate(term: Term) extends ElectionMessage
case object ElectedAsLeader extends ElectionMessage
case object ElectionTimeout extends ElectionMessage
/** When the Leader has sent an append, for an unexpected number, the Follower replies with this */
sealed trait AppendResponse extends FollowerResponse {
/** currentTerm for leader to update in the `nextTerm` lookup table */
def term: Term
}
case class AppendRejected(term: Term, lastIndex: Int) extends AppendResponse
case class AppendSuccessful(term: Term, lastIndex: Int) extends AppendResponse
/** Internal msg sent to actor which should start a snapshotting process */
case object InitLogSnapshot extends Message[Internal]
case object SendHeartbeat extends LeaderMessage
private[raft] case object AskForState extends Message[Internal]
private[raft] case class IAmInState(state: RaftState) extends Message[Internal]
// ---- testing and monitoring messages ----
case class EntryCommitted(idx: Int, on: ActorRef) extends Message[Testing]
case class SnapshotWritten(initialSize: Int, compactedSize: Int) extends Message[Testing]
// ---- end of testing and monitoring messages ----
}
| grahamar/inflatable | src/main/scala/com/teambytes/inflatable/raft/protocol/InternalProtocol.scala | Scala | apache-2.0 | 1,886 |
/**
* Copyright 2013 Robert Welin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mooo.nilewapps.bokbytarappen.server.data
import com.mooo.nilewapps.bokbytarappen.server.data.TokenJsonProtocol._
case class SessMess[T](
sess: Option[Token],
mess: T)
object SessMessJsonProtocol {
implicit val SessMessStringFormat = jsonFormat2(SessMess[String])
}
| nilewapp/BokBytarAppenServer | src/main/scala/com/mooo/nilewapps/bokbytarappen/server/data/SessMess.scala | Scala | apache-2.0 | 881 |
package com.twitter.finatra.example
import com.google.inject.Module
import com.twitter.inject.annotations.Flags
import com.twitter.inject.app.App
import com.twitter.inject.modules.StatsReceiverModule
import com.twitter.util.logging.Logger
import com.twitter.util.logging.Slf4jBridge
import scala.collection.mutable.ArrayBuffer
object SampleAppMain extends SampleApp
class SampleApp extends App with Slf4jBridge {
private[this] val log: Logger = Logger("SampleApp")
private[this] val queue: ArrayBuffer[Int] = new ArrayBuffer[Int]()
flag[String]("username", "Username to use.", "-username=Bob")
override val modules: Seq[Module] = Seq(StatsReceiverModule)
override protected def run(): Unit = {
queue += 3
val helloService: HelloService = injector.instance[HelloService]
// username Flag is mandatory. if it has no value, the app fails here.
val username: String = injector.instance[String](Flags.named("username"))
log.debug(s"Input username: $username")
log.info(helloService.hi(username))
}
init {
queue += 1
}
premain {
queue += 2
}
postmain {
queue += 4
}
onExit {
queue += 5
}
onExitLast {
queue += 6
}
def getQueue: Seq[Int] = this.queue.toSeq
}
| twitter/finatra | examples/injectable-app/scala/src/main/scala/com/twitter/finatra/example/HelloWorldApp.scala | Scala | apache-2.0 | 1,244 |
package test
import org.scalatest._
abstract class UnitSpec
extends FlatSpec
with Matchers
with OptionValues
with Inside
with Inspectors
with BeforeAndAfter
with BeforeAndAfterEach
with BeforeAndAfterAll
with OneInstancePerTest {
}
| memsql/streamliner-starter | src/test/scala/test/UnitSpec.scala | Scala | apache-2.0 | 254 |
package org.baz.qux
object Fibs {
def fibsRecursive(i: Int): Int = i match {
case 0 | 1 => i
case _ => fibsRecursive(i - 1) + fibsRecursive(i - 2)
}
def fibsIterative(i: Int): Int = {
var fst = 0
var snd = 1
var next = fst + snd
while (i > 0) {
fst = snd
snd = next
next += fst
}
fst
}
}
| sugakandrey/scalamu | testing/simple/src/main/scala/org/baz/qux/Fibs.scala | Scala | gpl-3.0 | 354 |
package domino.scala_osgi_metatype.builders
import domino.scala_osgi_metatype.interfaces.ObjectClassDefinition
/**
* Adds some convenience methods to the given object class definition.
*/
trait ObjectClassDefinitionConvenience {
/**
* Object class definition.
*/
protected def definition: ObjectClassDefinition
/**
* Returns both the attribute definitions of required and optional attributes for this object class definition.
*/
lazy val allAttributeDefinitions = definition.requiredAttributeDefinitions ++ definition.optionalAttributeDefinitions
/**
* Builds a configuration map containing all the default values.
*
* Ideal for overlaying a default configuration with an actual one:
* {{{
* val finalConfig = actualConfig ++ objectClass.defaultConfig
* }}}
*/
lazy val defaultConfig = {
allAttributeDefinitions flatMap { definition =>
definition.defaultValue map { value =>
definition.id -> value
}
} toMap
}
}
| helgoboss/domino | src/main/scala/domino/scala_osgi_metatype/builders/ObjectClassDefinitionConvenience.scala | Scala | mit | 995 |
/*
Copyright (c) 2009, 2010 Hanno Braun <mail@hannobraun.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hannobraun.sd.math
import Scalar._
import org.specs.Specification
import org.specs.runner.JUnit4
class ScalarTest extends JUnit4( ScalarSpec )
object ScalarSpec extends Specification {
"Scalar" should {
"implicitely convert Double to Scalar if the Double is mutliplied with a Vector2." in {
val vec = 2.0 * Vector2( 1, 1 )
vec must beEqualTo ( Vector2( 2, 2 ) )
}
"implicitely convert Int to Scalar if the Int is mutliplied with a Vector2." in {
val vec = 2 * Vector2( 1, 1 )
vec must beEqualTo( Vector2( 2, 2 ) )
}
}
}
| hannobraun/ScalableDynamics | src/test/scala/com/hannobraun/sd/math/ScalarSpec.scala | Scala | apache-2.0 | 1,166 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.sql
import java.util.Properties
import org.apache.spark.sql.SparkSession
object SQLDataSourceExample {
case class Person(name: String, age: Long)
def main(args: Array[String]) {
val spark = SparkSession
.builder()
.appName("Spark SQL data sources example")
.config("spark.some.config.option", "some-value")
.getOrCreate()
runBasicDataSourceExample(spark)
runBasicParquetExample(spark)
runParquetSchemaMergingExample(spark)
runJsonDatasetExample(spark)
runJdbcDatasetExample(spark)
spark.stop()
}
private def runBasicDataSourceExample(spark: SparkSession): Unit = {
// $example on:generic_load_save_functions$
val usersDF = spark.read.load("examples/src/main/resources/users.parquet")
usersDF.select("name", "favorite_color").write.save("namesAndFavColors.parquet")
// $example off:generic_load_save_functions$
// $example on:manual_load_options$
val peopleDF = spark.read.format("json").load("examples/src/main/resources/people.json")
peopleDF.select("name", "age").write.format("parquet").save("namesAndAges.parquet")
// $example off:manual_load_options$
// $example on:direct_sql$
val sqlDF = spark.sql("SELECT * FROM parquet.`examples/src/main/resources/users.parquet`")
// $example off:direct_sql$
}
private def runBasicParquetExample(spark: SparkSession): Unit = {
// $example on:basic_parquet_example$
// Encoders for most common types are automatically provided by importing spark.implicits._
import spark.implicits._
val peopleDF = spark.read.json("examples/src/main/resources/people.json")
// DataFrames can be saved as Parquet files, maintaining the schema information
peopleDF.write.parquet("people.parquet")
// Read in the parquet file created above
// Parquet files are self-describing so the schema is preserved
// The result of loading a Parquet file is also a DataFrame
val parquetFileDF = spark.read.parquet("people.parquet")
// Parquet files can also be used to create a temporary view and then used in SQL statements
parquetFileDF.createOrReplaceTempView("parquetFile")
val namesDF = spark.sql("SELECT name FROM parquetFile WHERE age BETWEEN 13 AND 19")
namesDF.map(attributes => "Name: " + attributes(0)).show()
// +------------+
// | value|
// +------------+
// |Name: Justin|
// +------------+
// $example off:basic_parquet_example$
}
private def runParquetSchemaMergingExample(spark: SparkSession): Unit = {
// $example on:schema_merging$
// This is used to implicitly convert an RDD to a DataFrame.
import spark.implicits._
// Create a simple DataFrame, store into a partition directory
val squaresDF = spark.sparkContext.makeRDD(1 to 5).map(i => (i, i * i)).toDF("value", "square")
squaresDF.write.parquet("data/test_table/key=1")
// Create another DataFrame in a new partition directory,
// adding a new column and dropping an existing column
val cubesDF = spark.sparkContext.makeRDD(6 to 10).map(i => (i, i * i * i)).toDF("value", "cube")
cubesDF.write.parquet("data/test_table/key=2")
// Read the partitioned table
val mergedDF = spark.read.option("mergeSchema", "true").parquet("data/test_table")
mergedDF.printSchema()
// The final schema consists of all 3 columns in the Parquet files together
// with the partitioning column appeared in the partition directory paths
// root
// |-- value: int (nullable = true)
// |-- square: int (nullable = true)
// |-- cube: int (nullable = true)
// |-- key: int (nullable = true)
// $example off:schema_merging$
}
private def runJsonDatasetExample(spark: SparkSession): Unit = {
// $example on:json_dataset$
// Primitive types (Int, String, etc) and Product types (case classes) encoders are
// supported by importing this when creating a Dataset.
import spark.implicits._
// A JSON dataset is pointed to by path.
// The path can be either a single text file or a directory storing text files
val path = "examples/src/main/resources/people.json"
val peopleDF = spark.read.json(path)
// The inferred schema can be visualized using the printSchema() method
peopleDF.printSchema()
// root
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
// Creates a temporary view using the DataFrame
peopleDF.createOrReplaceTempView("people")
// SQL statements can be run by using the sql methods provided by spark
val teenagerNamesDF = spark.sql("SELECT name FROM people WHERE age BETWEEN 13 AND 19")
teenagerNamesDF.show()
// +------+
// | name|
// +------+
// |Justin|
// +------+
// Alternatively, a DataFrame can be created for a JSON dataset represented by
// an Dataset[String] storing one JSON object per string
val otherPeopleDataset = spark.createDataset(
"""{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}""" :: Nil)
val otherPeople = spark.read.json(otherPeopleDataset)
otherPeople.show()
// +---------------+----+
// | address|name|
// +---------------+----+
// |[Columbus,Ohio]| Yin|
// +---------------+----+
// $example off:json_dataset$
}
private def runJdbcDatasetExample(spark: SparkSession): Unit = {
// $example on:jdbc_dataset$
// Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
// Loading data from a JDBC source
val jdbcDF = spark.read
.format("jdbc")
.option("url", "jdbc:postgresql:dbserver")
.option("dbtable", "schema.tablename")
.option("user", "username")
.option("password", "password")
.load()
val connectionProperties = new Properties()
connectionProperties.put("user", "username")
connectionProperties.put("password", "password")
val jdbcDF2 = spark.read
.jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties)
// Saving data to a JDBC source
jdbcDF.write
.format("jdbc")
.option("url", "jdbc:postgresql:dbserver")
.option("dbtable", "schema.tablename")
.option("user", "username")
.option("password", "password")
.save()
jdbcDF2.write
.jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties)
// $example off:jdbc_dataset$
}
}
| jianran/spark | examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala | Scala | apache-2.0 | 7,270 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.MemorySize
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.operators.SimpleOperatorFactory
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.CodeGeneratorContext
import org.apache.flink.table.planner.codegen.ProjectionCodeGenerator.generateProjection
import org.apache.flink.table.planner.codegen.sort.SortCodeGenerator
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistributionTraitDef
import org.apache.flink.table.planner.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.planner.plan.nodes.exec.ExecNode
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, FlinkRelOptUtil, JoinUtil, SortUtil}
import org.apache.flink.table.runtime.operators.join.{FlinkJoinType, SortMergeJoinOperator}
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan._
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollationTraitDef, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import java.util
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for sort-merge [[Join]].
*/
class BatchExecSortMergeJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftRel: RelNode,
rightRel: RelNode,
condition: RexNode,
joinType: JoinRelType,
// true if LHS is sorted by left join keys, else false
val leftSorted: Boolean,
// true if RHS is sorted by right join key, else false
val rightSorted: Boolean)
extends BatchExecJoinBase(cluster, traitSet, leftRel, rightRel, condition, joinType) {
protected lazy val (leftAllKey, rightAllKey) =
JoinUtil.checkAndGetJoinKeys(keyPairs, getLeft, getRight)
protected def isMergeJoinSupportedType(joinRelType: FlinkJoinType): Boolean = {
joinRelType == FlinkJoinType.INNER ||
joinRelType == FlinkJoinType.LEFT ||
joinRelType == FlinkJoinType.RIGHT ||
joinRelType == FlinkJoinType.FULL
}
override def copy(
traitSet: RelTraitSet,
conditionExpr: RexNode,
left: RelNode,
right: RelNode,
joinType: JoinRelType,
semiJoinDone: Boolean): Join = {
new BatchExecSortMergeJoin(
cluster,
traitSet,
left,
right,
conditionExpr,
joinType,
leftSorted,
rightSorted)
}
override def explainTerms(pw: RelWriter): RelWriter =
super.explainTerms(pw)
.itemIf("leftSorted", leftSorted, leftSorted)
.itemIf("rightSorted", rightSorted, rightSorted)
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val leftRowCnt = mq.getRowCount(getLeft)
val rightRowCnt = mq.getRowCount(getRight)
if (leftRowCnt == null || rightRowCnt == null) {
return null
}
val numOfSort = joinInfo.leftKeys.size()
val leftSortCpuCost: Double = if (leftSorted) {
// cost of writing lhs data to buffer
leftRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * leftRowCnt * Math.max(Math.log(leftRowCnt), 1.0)
}
val rightSortCpuCost: Double = if (rightSorted) {
// cost of writing rhs data to buffer
rightRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * rightRowCnt * Math.max(Math.log(rightRowCnt), 1.0)
}
// cost of evaluating each join condition
val joinConditionCpuCost = FlinkCost.COMPARE_CPU_COST * (leftRowCnt + rightRowCnt)
val cpuCost = leftSortCpuCost + rightSortCpuCost + joinConditionCpuCost
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
// assume memory is big enough, so sort process and mergeJoin process will not spill to disk.
var sortMemCost = 0D
if (!leftSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getLeft)
}
if (!rightSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getRight)
}
val rowCount = mq.getRowCount(this)
costFactory.makeCost(rowCount, cpuCost, 0, 0, sortMemCost)
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val (canSatisfyDistribution, leftRequiredDistribution, rightRequiredDistribution) =
satisfyHashDistributionOnNonBroadcastJoin(requiredDistribution)
if (!canSatisfyDistribution) {
return None
}
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val requiredFieldCollations = requiredCollation.getFieldCollations
val shuffleKeysSize = leftRequiredDistribution.getKeys.size
val newLeft = RelOptRule.convert(getLeft, leftRequiredDistribution)
val newRight = RelOptRule.convert(getRight, rightRequiredDistribution)
// SortMergeJoin can provide collation trait, check whether provided collation can satisfy
// required collations
val canProvideCollation = if (requiredCollation.getFieldCollations.isEmpty) {
false
} else if (requiredFieldCollations.size > shuffleKeysSize) {
// Sort by [a, b] can satisfy [a], but cannot satisfy [a, b, c]
false
} else {
val leftKeys = leftRequiredDistribution.getKeys
val leftFieldCnt = getLeft.getRowType.getFieldCount
val rightKeys = rightRequiredDistribution.getKeys.map(_ + leftFieldCnt)
requiredFieldCollations.zipWithIndex.forall { case (collation, index) =>
val idxOfCollation = collation.getFieldIndex
// Full outer join is handled before, so does not need care about it
if (idxOfCollation < leftFieldCnt && joinType != JoinRelType.RIGHT) {
val fieldCollationOnLeftSortKey = FlinkRelOptUtil.ofRelFieldCollation(leftKeys.get(index))
collation == fieldCollationOnLeftSortKey
} else if (idxOfCollation >= leftFieldCnt &&
(joinType == JoinRelType.RIGHT || joinType == JoinRelType.INNER)) {
val fieldCollationOnRightSortKey =
FlinkRelOptUtil.ofRelFieldCollation(rightKeys.get(index))
collation == fieldCollationOnRightSortKey
} else {
false
}
}
}
var newProvidedTraitSet = getTraitSet.replace(requiredDistribution)
if (canProvideCollation) {
newProvidedTraitSet = newProvidedTraitSet.replace(requiredCollation)
}
Some(copy(newProvidedTraitSet, Seq(newLeft, newRight)))
}
//~ ExecNode methods -----------------------------------------------------------
/**
* Now must be full dam without two input operator chain.
* TODO two input operator chain will return different value.
*/
override def getDamBehavior: DamBehavior = DamBehavior.FULL_DAM
override def getInputNodes: util.List[ExecNode[BatchPlanner, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchPlanner, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchPlanner, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
planner: BatchPlanner): Transformation[RowData] = {
val config = planner.getTableConfig
val leftInput = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
val rightInput = getInputNodes.get(1).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
val leftType = leftInput.getOutputType.asInstanceOf[InternalTypeInfo[RowData]].toRowType
val rightType = rightInput.getOutputType.asInstanceOf[InternalTypeInfo[RowData]].toRowType
val keyType = RowType.of(leftAllKey.map(leftType.getChildren.get(_)): _*)
val condFunc = JoinUtil.generateConditionFunction(
config,
cluster.getRexBuilder,
getJoinInfo,
leftType,
rightType)
val externalBufferMemory = MemorySize.parse(config.getConfiguration.getString(
ExecutionConfigOptions.TABLE_EXEC_RESOURCE_EXTERNAL_BUFFER_MEMORY)).getBytes
val sortMemory = MemorySize.parse(config.getConfiguration.getString(
ExecutionConfigOptions.TABLE_EXEC_RESOURCE_SORT_MEMORY)).getBytes
val externalBufferNum = if (flinkJoinType == FlinkJoinType.FULL) 2 else 1
val managedMemory = externalBufferMemory * externalBufferNum + sortMemory * 2
def newSortGen(originalKeys: Array[Int], t: RowType): SortCodeGenerator = {
val originalOrders = originalKeys.map(_ => true)
val (keys, orders, nullsIsLast) = SortUtil.deduplicateSortKeys(
originalKeys,
originalOrders,
SortUtil.getNullDefaultOrders(originalOrders))
val types = keys.map(t.getTypeAt)
new SortCodeGenerator(config, keys, types, orders, nullsIsLast)
}
val leftSortGen = newSortGen(leftAllKey, leftType)
val rightSortGen = newSortGen(rightAllKey, rightType)
val operator = new SortMergeJoinOperator(
externalBufferMemory.toDouble / managedMemory,
flinkJoinType,
estimateOutputSize(getLeft) < estimateOutputSize(getRight),
condFunc,
generateProjection(
CodeGeneratorContext(config), "SMJProjection", leftType, keyType, leftAllKey),
generateProjection(
CodeGeneratorContext(config), "SMJProjection", rightType, keyType, rightAllKey),
leftSortGen.generateNormalizedKeyComputer("LeftComputer"),
leftSortGen.generateRecordComparator("LeftComparator"),
rightSortGen.generateNormalizedKeyComputer("RightComputer"),
rightSortGen.generateRecordComparator("RightComparator"),
newSortGen(leftAllKey.indices.toArray, keyType).generateRecordComparator("KeyComparator"),
filterNulls)
ExecNode.createTwoInputTransformation(
leftInput,
rightInput,
getRelDetailedDescription,
SimpleOperatorFactory.of(operator),
InternalTypeInfo.of(FlinkTypeFactory.toLogicalRowType(getRowType)),
rightInput.getParallelism,
managedMemory)
}
private def estimateOutputSize(relNode: RelNode): Double = {
val mq = relNode.getCluster.getMetadataQuery
mq.getAverageRowSize(relNode) * mq.getRowCount(relNode)
}
}
| darionyaphet/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecSortMergeJoin.scala | Scala | apache-2.0 | 11,385 |
package org.jetbrains.plugins.scala
package codeInsight
package intention
package controlFlow
import com.intellij.testFramework.EditorTestUtil
/**
* @author Ksenia.Sautina
* @since 6/6/12
*/
class MergeElseIfIntentionTest extends intentions.ScalaIntentionTestBase {
import EditorTestUtil.{CARET_TAG => CARET}
override def familyName = ScalaCodeInsightBundle.message("family.name.merge.else.if")
def testMergeElseIf1(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se {
| if (a == 8) {
| System.out.println("if2")
| } else {
| System.out.println("else")
| }
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se if (a == 8) {
| System.out.println("if2")
| } else {
| System.out.println("else")
| }
| }
|}""".stripMargin
doTest(text, resultText)
}
def testMergeElseIf2(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) System.out.println("if1")
| el${CARET}se {
| if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) System.out.println("if1")
| el${CARET}se if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
|}""".stripMargin
doTest(text, resultText)
}
def testMergeElseIf3(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se {
| if (a == 8)
| System.out.println("if2")
| else {
| System.out.println("else")
| }
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se if (a == 8)
| System.out.println("if2")
| else {
| System.out.println("else")
| }
| }
|}""".stripMargin
doTest(text, resultText)
}
def testMergeElseIf4(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se {
| if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) {
| System.out.println("if1")
| } el${CARET}se if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
|}""".stripMargin
doTest(text, resultText)
}
def testMergeElseIf5(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9)
| System.out.println("if1")
| else${CARET} {
| if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) System.out.println("if1")
| else${CARET} if (a == 8)
| System.out.println("if2")
| else
| System.out.println("else")
| }
|}""".stripMargin
doTest(text, resultText)
}
def testMergeElseIf6(): Unit = {
val text =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9)
| System.out.println("if1")
| else${CARET} {
| if (a == 8)
| System.out.println("if2")
| }
| }
|}""".stripMargin
val resultText =
s"""class MergeElseIf {
| def mthd() {
| val a: Int = 0
| if (a == 9) System.out.println("if1")
| else${CARET} if (a == 8)
| System.out.println("if2")
| }
|}""".stripMargin
doTest(text, resultText)
}
} | JetBrains/intellij-scala | scala/codeInsight/test/org/jetbrains/plugins/scala/codeInsight/intention/controlFlow/MergeElseIfIntentionTest.scala | Scala | apache-2.0 | 5,234 |
package gateway.restapi
import scala.concurrent.ExecutionContext
import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import gateway.restapi.domain.storagecontext.StorageContext
import gateway.restapi.http.HttpService
import gateway.restapi.services.{ClientsService, TransactionsService, WalletsService}
import gateway.restapi.utils.Config
object Main extends App with Config {
implicit val actorSystem = ActorSystem("gateway-sketch-rest-api")
implicit val executor: ExecutionContext = actorSystem.dispatcher
implicit val log: LoggingAdapter = Logging(actorSystem, getClass)
implicit val materializer: ActorMaterializer = ActorMaterializer()
val clientsService = new ClientsService(StorageContext.instanceProd)
val walletsService = new WalletsService(StorageContext.instanceProd)
val transactionService = new TransactionsService(StorageContext.instanceProd, walletsService)
val httpService = new HttpService(clientsService, transactionService, walletsService)
Http().bindAndHandle(httpService.routes, httpHost, httpPort)
}
| kartavtcev/gateway-sketch | src/main/scala/gateway/restapi/Main.scala | Scala | mit | 1,137 |
case object Pos {
val Mid = Pos(0, 0)
def apply(s: String): Pos = {
val Array(x, y) = s.split(":").map(_.toInt)
Pos(x, y)
}
}
case class Pos(x: Int, y: Int) {
def linear(size: Int) = y * size + x
def euclideanDistance(other: Pos) = math.sqrt((other.x - x) * (other.x - x) + (other.y - y) * (other.y - y))
def distance(other: Pos) = math.max((other.x - x).abs, (other.y - y).abs)
def +(other: Pos) = Pos(x + other.x, y + other.y)
def +(dir: Direction): Pos = this + dir.offset
def -(other: Pos) = Pos(x - other.x, y - other.y)
def *(value: Int) = Pos(x * value, y * value)
def /(value: Int) = Pos(x / value, y / value)
override def toString = x + ":" + y
override def equals(that: Any) = that match {
case p: Pos => p.x == x && p.y == y
case _ => false
}
override def hashCode = x * 31 + y
} | nurkiewicz/scalatron-bot | src/main/scala/Pos.scala | Scala | apache-2.0 | 816 |
package com.hindog.grid.launch
import com.hindog.grid.{ClasspathUtils, GridConfig, GridExecutor}
import com.hindog.grid.repo.{Resource, _}
import com.typesafe.scalalogging.Logger
import io.github.lukehutch.fastclasspathscanner.FastClasspathScanner
import org.apache.commons.io.FileUtils
import scala.collection.{mutable, Iterable}
import scala.collection.mutable.ListBuffer
import java.io.File
import java.lang.management.ManagementFactory
import java.util.Properties
import java.util.concurrent.Callable
trait RemoteLauncher[C] {
type Repr
import RemoteLauncher._
/**
* Build remote submit command
*/
// def submitCommand(args: Array[String]): Array[String]
def buildProcess(args: Array[String]): ProcessBuilder
protected def isSubmitted: Boolean
protected def shellCommand: Iterable[String]
protected def confGetOption(conf: C, key: String): Option[String]
protected def confSetIfMissing(conf: C, key: String, value: String): C
@transient protected val arguments: ListBuffer[ArgumentBuilder[Repr, C]] = mutable.ListBuffer[ArgumentBuilder[Repr, C]]()
protected def arg(submitArg: String, confKey: String): Unit = arguments += Argument[Repr, C](Option(submitArg), Option(confKey), None, (_, conf) => confGetOption(conf, confKey))
protected def arg(submitArg: String, accessor: (Repr, C) => Option[String]): Unit = arguments += Argument[Repr, C](Option(submitArg), None, None, accessor)
protected def flag(submitFlag: String, confKey: String): Unit = arguments += Argument[Repr, C](None, Option(confKey), Option(submitFlag), (_, conf) => confGetOption(conf, confKey).map(_.toLowerCase))
protected def flag(submitFlag: String): Unit = arguments += Argument[Repr, C](None, None, Option(submitFlag), (_, _) => Some("true"))
protected def flag(submitFlag: String, accessor: (Repr, C) => Boolean): Unit = arguments += Argument[Repr, C](None, None, Option(submitFlag), (instance, conf) => if (accessor(instance, conf)) Some("true") else None)
protected def grid: GridConfig = RemoteLaunch.gridConfig(getClass.getSimpleName.stripSuffix("$"))
protected def onGridConfig(grid: GridConfig): GridConfig = grid
protected def repository: Option[Repository] = {
// TODO: source props from configuration file
val props = new Properties()
RemoteLaunch.launchArgs.jarCacheRepositoryClass.toOption.map(cls => Class.forName(cls).getConstructor(classOf[Properties]).newInstance(props).asInstanceOf[Repository])
}
protected def configure(args: Array[String], conf: C): C = {
if (!isSubmitted) {
RemoteLaunch.launchArgs.conf.foldLeft(conf)((acc, cur) => confSetIfMissing(acc, cur._1, cur._2))
} else conf
}
protected def loadDefaults: Boolean = RemoteLaunch.launchArgs.loadDefaults()
protected def mainClass: String = getClass.getName.stripSuffix("$")
def applicationJar: String = {
import scala.collection.JavaConverters._
val result = new FastClasspathScanner(this.getClass.getName).ignoreFieldVisibility().ignoreMethodVisibility().suppressMatchProcessorExceptions().scan(10)
val classInfoMap = result.getClassNameToClassInfo.asScala.filter(_._1 == this.getClass.getName)
if (classInfoMap.isEmpty) throw new IllegalStateException(s"Unable to find classpath jar containing main-class: $mainClass")
else {
classInfoMap.head._2.getClasspathElementFile.toString
}
}
protected def buildClusterClasspath(classpath: Iterable[Resource]): Iterable[Resource] = filterClusterClasspath {
import scala.collection.JavaConverters._
repository match {
case Some(repo) => try {
val jdkJars = FileUtils.listFiles(new File(System.getProperty("java.home")), Array(".jar"), true).asScala.map { f =>
f.toURI.toURL.toString
}.toSet
classpath.filter { cp => !jdkJars.contains(cp.uri.toURL.toString) }.map(repo.put)
} finally {
//repo.close()
}
case None => classpath
}
}
protected def filterClusterClasspath(classpath: Iterable[Resource]): Iterable[Resource] = classpath
def main(args: Array[String]): Unit = {
/*
Detect if we are running via submit command, if so, run as normal, otherwise invoke remote launch...
*/
if (isSubmitted) {
run(args)
} else {
val gridConfig = grid.withInheritedSystemPropertiesFilter(_.startsWith("grid."))
val retCode = GridExecutor.withInstance(gridConfig) { executor =>
val task = executor.submit(new Callable[Int] with Serializable {
override def call(): Int = {
import scala.collection.JavaConverters._
val logger = Logger(mainClass)
logger.info(s"Running $mainClass remotely under process: ${ManagementFactory.getRuntimeMXBean.getName}")
val builder = buildProcess(args)
logger.info(s"Submit Command: \\n\\n" + builder.command().asScala.mkString(" ") + "\\n")
val env = builder.environment()
env.put(submitEnvFlag, "true")
val process = builder.inheritIO().start()
val ret = process.waitFor()
ret
}
})
try {
task.get()
} finally {
// pause a bit to wait for StdOut/StdErr streams
Thread.sleep(1000)
}
}
System.exit(retCode)
}
}
def run(args: Array[String])
}
object RemoteLauncher {
val submitEnvFlag = "GRID_EXECUTOR_SUBMIT"
type ArgumentBuilder[T, C] = (T, C) => Iterable[String]
case class Argument[T, C](submitArg: Option[String], confKey: Option[String], flag: Option[String], accessor: (T, C) => Option[String]) extends ((T, C) => Iterable[String]) with Serializable {
def apply(instance: T, conf: C): Iterable[String] = {
accessor(instance, conf).map(value =>
// if we have a flag, then we have slightly different behavior--
// we pass the flag argument with no value
if (flag.isDefined) {
if (accessor(instance, conf).map(_.toLowerCase).contains("true")) {
flag.toIterable
} else {
Iterable.empty
}
} else {
submitArg.fold(Iterable.empty[String])(arg => Iterable(arg, value))
}
).getOrElse(Iterable.empty)
}
}
} | hindog/grid-executor | grid-executor-launcher/src/main/scala/com/hindog/grid/launch/RemoteLauncher.scala | Scala | apache-2.0 | 6,250 |
package com.twitter.concurrent
/**
* Token representing an interest in a resource and a way to release that interest.
*/
trait Permit {
/**
* Indicate that you are done with your Permit.
*
* @note calling this multiple times will result in undefined behavior.
*/
def release(): Unit
}
| twitter/util | util-core/src/main/scala/com/twitter/concurrent/Permit.scala | Scala | apache-2.0 | 305 |
package controllers
import models.UserData
import play.api.mvc.{Action, Controller}
/**
* Created by knoldus on 7/3/17.
*/
class ProfileController extends Controller{
def showProfile = Action { request =>
request.session.get("connected").map { user =>
val userData = UserData.getUser(user)
if(userData.nonEmpty) {
Ok(views.html.userprofile(userData("name"))(userData))
}
else {
Redirect(routes.HomeController.index())
}
}.getOrElse {
Redirect(routes.SignupController.showSignupForm())
}
}
}
| ashishknoldus/play-scala-assignment | app/controllers/ProfileController.scala | Scala | apache-2.0 | 569 |
package org.abhijitsarkar.feign.service
import org.abhijitsarkar.feign.api.model.Request
import org.abhijitsarkar.feign.api.persistence.IdGenerator
/**
* @author Abhijit Sarkar
*/
final class DefaultIdGenerator extends IdGenerator {
val pattern = """^(?:/?)([^/]+)(?:.*)$""".r
override def id(request: Request): String = {
val prefix = pattern.findFirstMatchIn(request.path)
.map(_ group 1)
.getOrElse("unknown")
s"${prefix}-${request.path.hashCode}"
}
}
| abhijitsarkar/feign | app/org/abhijitsarkar/feign/service/DefaultIdGenerator.scala | Scala | apache-2.0 | 489 |
package edu.neu.coe.csye._7200.enums.enumeration
/**
* @author scalaprof
*/
import Rank.{Ace, Deuce, Eight, Five, Four, King, Knave, Nine, Queen, Seven, Six, Ten, Trey}
import Suit.{Clubs, Diamonds, Hearts, Spades}
import org.scalatest._
class CardsSpec_Enumeration extends FlatSpec with Matchers with Inside {
"ranks" should "be ordered properly" in {
assert(Ace > King)
val ace = Suit.withName("Ace")
assert(ace == Ace)
val rankList = List(Ace,Trey,Four,Queen, Knave, Nine, Seven, Six,Deuce,Five,King,Ten,Eight)
rankList.sorted.reverse shouldBe List(Ace,King,Queen,Knave,Ten,Nine,Eight,Seven,Six,Five,Four,Trey,Deuce)
}
it should "distinguish honors" in {
assert(Ace.isHonor)
assert(Deuce.isSpot)
}
"suits" should "be ordered properly" in {
assert(Spades > Clubs)
val suitList = List(Clubs,Hearts,Spades,Diamonds)
suitList.sorted.reverse shouldBe List(Spades,Hearts,Diamonds,Clubs)
}
it should "know the color" in {
assert(Hearts.isRed)
assert(Spades.isBlack)
}
}
| rchillyard/Scalaprof | FunctionalProgramming/src/test/scala/edu/neu/coe/csye/_7200/enums/enumeration/CardsSpec_Enumeration.scala | Scala | gpl-2.0 | 1,041 |
package scala.meta
package ui
import scala.annotation.implicitNotFound
import org.scalameta.show._
import Show.{sequence => s, repeat => r, indent => i, newline => n, meta => m, adorn => a, function => fn}
@implicitNotFound(msg = "don't know how to show[Code] for ${T} (if you're prettyprinting a tree, be sure to import a dialect, e.g. scala.meta.dialects.Scala211)")
trait Code[T] extends Show[T]
object Code {
def apply[T](f: T => Show.Result): Code[T] = new Code[T] { def apply(input: T) = f(input) }
implicit def codeTree[T <: Tree](implicit dialect: Dialect): Code[T] = Code { (x: Tree) => s(x.tokens.map(_.show[Code]).mkString) }
implicit def codeToken[T <: Token]: Code[T] = Code { x => s(x.code) }
}
| mdemarne/scalameta | scalameta/src/main/scala/scala/meta/ui/ShowCode.scala | Scala | bsd-3-clause | 717 |
package org.scalaide.core.semantichighlighting.classifier
import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolTypes._
import org.junit._
class MethodParamTest extends AbstractSymbolClassifierTest {
@Test
def basic_parameter(): Unit = {
checkSymbolClassification("""
object A {
{
def method(ppppppp: Int) = ppppppp
}
}""", """
object A {
{
def method($PARAM$: Int) = $PARAM$
}
}""",
Map("PARAM" -> Param))
}
@Test
def function_param(): Unit = {
checkSymbolClassification("""
object A {
{
List(3) map { ppppppp => ppppppp * 2 }
}
}""", """
object A {
{
List(3) map { $PARAM$ => $PARAM$ * 2 }
}
}""",
Map("PARAM" -> Param))
}
@Test
def named_arguments(): Unit = {
checkSymbolClassification("""
object A {
def foo(ppppppp: String) = 42
{
foo(ppppppp = "wibble")
}
}""", """
object A {
def foo($PARAM$: String) = 42
{
foo($PARAM$ = "wibble")
}
}""",
Map("PARAM" -> Param))
}
@Test
def annotation_arguments(): Unit = {
checkSymbolClassification("""
@SuppressWarnings(value = Array("all"))
class A
""", """
@SuppressWarnings($PRM$ = Array("all"))
class A
""",
Map("PRM" -> Param))
}
@Test
def case_constructor_arguments(): Unit = {
checkSymbolClassification("""
case class Bob(param: Int) {
Bob(param = 42)
}""", """
case class Bob(param: Int) {
Bob($PRM$ = 42)
}""",
Map("PRM" -> Param))
}
} | stephenh/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/semantichighlighting/classifier/MethodParamTest.scala | Scala | bsd-3-clause | 1,737 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka.tools.status
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.kafka.tools.{KafkaDataStoreCommand, OptionalZkPathParams}
import org.locationtech.geomesa.kafka10.KafkaUtils10
import org.locationtech.geomesa.tools.Command
class KafkaGetTypeNamesCommand extends KafkaDataStoreCommand {
override val name = "get-names"
override val params = new KafkaGetTypeNamesParams()
override def execute(): Unit = {
// We instantiate the class at runtime to avoid classpath dependencies from commands that are not being used.
new KafkaGetTypeNamesCommandExecutor(params).run()
}
}
@Parameters(commandDescription = "List GeoMesa features for a given zkPath")
class KafkaGetTypeNamesParams extends OptionalZkPathParams {
override val isProducer: Boolean = false
override var partitions: String = null
override var replication: String = null
}
class KafkaGetTypeNamesCommandExecutor(override val params: KafkaGetTypeNamesParams) extends Runnable with KafkaDataStoreCommand {
import org.I0Itec.zkclient.ZkClient
import org.I0Itec.zkclient.exception.ZkNoNodeException
override val name = ""
override def run(): Unit = {}
override def execute(): Unit = {
if (params.zkPath == null) {
Command.user.info(s"Running List Features without zkPath...")
val zkUtils = KafkaUtils10.createZkUtils(params.zookeepers, Int.MaxValue, Int.MaxValue)
try {
zkUtils.getAllTopics.filter(_.contains('-')).foreach(printZkPathAndTopicString(zkUtils.zkClient, _))
} finally {
zkUtils.close()
}
} else {
Command.user.info(s"Running List Features using zkPath ${params.zkPath}...")
withDataStore(_.getTypeNames.foreach(Command.output.info))
}
}
/**
* Fetches schema info from zookeeper to check if the topic is one created by GeoMesa.
* Prints zkPath and SFT name if valid.
*
* @param topic The kafka topic
*/
def printZkPathAndTopicString(zkClient: ZkClient, topic: String): Unit = {
val sb = new StringBuilder()
var tokenizedTopic = topic.split("-")
var tokenizedTopicCount = tokenizedTopic.length
while (tokenizedTopicCount > 1) {
try {
val topicName = zkClient.readData[String](getTopicNamePath(tokenizedTopic)) // throws ZkNoNodeException if not valid
if (topicName.equals(topic)) {
Command.user.info(s"/${tokenizedTopic.take(tokenizedTopicCount-1).mkString("/")} - ${tokenizedTopic.last}")
return
}
} catch {
case e: ZkNoNodeException =>
// wrong zkPath and schema name combo
} finally {
tokenizedTopicCount -= 1
tokenizedTopic = topic.split("-", tokenizedTopicCount)
}
}
}
private def getTopicNamePath(tokenizedTopic: Array[String]): String = {
s"/${tokenizedTopic.mkString("/")}/Topic"
}
}
| MutahirKazmi/geomesa | geomesa-kafka/geomesa-kafka-tools/geomesa-kafka-10-tools/src/main/scala/org/locationtech/geomesa/kafka/tools/status/GetNamesCommand.scala | Scala | apache-2.0 | 3,339 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder
import java.nio.file.Path
import io.gatling.recorder.cli.ArgsParser
import io.gatling.recorder.config.RecorderConfiguration
import io.gatling.recorder.controller.RecorderController
object GatlingRecorder {
def main(args: Array[String]): Unit = fromArgs(args)
def fromArgs(args: Array[String]): Unit = {
val argsParser = new ArgsParser(args)
argsParser.parseArguments.map(overrides => initRecorder(overrides, None))
}
def fromMap(props: ConfigOverrides, recorderConfigFile: Option[Path] = None) =
initRecorder(props, recorderConfigFile)
private def initRecorder(props: ConfigOverrides, recorderConfigFile: Option[Path]) = {
RecorderConfiguration.initialSetup(props, recorderConfigFile)
new RecorderController
}
}
| ryez/gatling | gatling-recorder/src/main/scala/io/gatling/recorder/GatlingRecorder.scala | Scala | apache-2.0 | 1,397 |
package chapter11
object Exercise1 {
def optionMonad: Monad[Option] = new Monad[Option] {
/**
*
*/
def unit[A](a: => A): Option[A] = Some(a)
/**
*
*/
def flatMap[A, B](fa: Option[A])(f: A => Option[B]): Option[B] = fa match {
case None => None
case Some(a) => f(a)
}
}
def streamMonad: Monad[Stream] = new Monad[Stream]{
/**
*
*/
def unit[A](a: => A): Stream[A] = Stream(a)
def flatMap[A, B](fa: Stream[A])(f: A => Stream[B]): Stream[B] =
fa flatMap f
}
def listMonad: Monad[List] = new Monad[List] {
/**
*
*/
def unit[A](a: => A): List[A] = List(a)
/**
*
*/
def flatMap[A, B](fa: List[A])(f: A => List[B]): List[B] =
fa flatMap f
}
def main(args: Array[String]): Unit = {
val om = optionMonad
val lm = listMonad
val sm = streamMonad
assert(om.unit(1) == Some(1))
assert(lm.unit(1) == List(1))
assert(sm.unit(1) == Stream(1))
assert(om.map(Option(1))(_ + 1) == Some(2))
assert(om.map(None: Option[Int])(_ + 1) == None)
assert(lm.map(List(1))(_ + 1) == List(2))
assert(lm.map(Nil: List[Int])(_ + 1) == Nil)
assert(sm.map(Stream(1))(_ + 1) == Stream(2))
assert(sm.map(Stream.empty[Int])(_ + 1) == Stream.empty[Int])
assert(om.map2(Some(1), Some(2))(_ + _) == Some(3))
assert(om.map2(None: Option[Int], Some(2))(_ + _) == None)
assert(om.map2(Some(1), None)(_ + _) == None)
assert(lm.map2(List(1), List(2))(_ + _) == List(3))
assert(lm.map2(Nil: List[Int], List(2))(_ + _) == Nil)
assert(lm.map2(List(1), Nil)(_ + _) == Nil)
val eStream = Stream.empty[Int]
assert(sm.map2(Stream(1), Stream(2))(_ + _) == Stream(3))
assert(sm.map2(eStream, Stream(2))(_ + _) == eStream)
assert(sm.map2(Stream(1), eStream)(_ + _) == eStream)
println("All tests successful")
}
} | amolnayak311/functional-programming-in-scala | src/chapter11/Exercise1.scala | Scala | unlicense | 2,163 |
package top.myetl.lucenerdd
import com.holdenkarau.spark.testing.SharedSparkContext
import org.apache.lucene.document.Document
import org.apache.lucene.search.ScoreDoc
import org.apache.spark.sql.catalyst.InternalRow
import org.scalatest.{BeforeAndAfterEach, FlatSpec, Ignore, Matchers}
import top.myetl.lucenerdd.convert.DocToBean
import top.myetl.lucenerdd.util.{Constants, LuceneRDDKryoRegistrator}
import top.myetl.lucenerdd.query.MyQuery._
import top.myetl.lucenerdd.rdd.LuceneRDD
/**
* Created by pengda on 17/1/5.
*/
@Ignore
class SparkContextTest extends FlatSpec
with Matchers
with BeforeAndAfterEach
with SharedSparkContext {
var rdd : LuceneRDD[_] = _
override def beforeAll(): Unit = {
conf.set(Constants.HdfsBaseDirKey, "hdfs://ubuntu:9000/sparklu/")
conf.setAppName("test1app")
LuceneRDDKryoRegistrator.registerKryoClasses(conf)
super.beforeAll()
val convert = new DocToBean[String] {
override def toBean(score: ScoreDoc, doc: Document): String = score.doc.toString+" -> "+doc.get("_all")
}
rdd = sc.luceneRDD("test")(convert)
rdd.setName("rdd")
}
"sparkContext" should "SparkContext functions" in {
val queryRdd = rdd.query(term("_all", "测试9999"))
println(queryRdd.take(2).map(println))
println(rdd.query(should(term("_all","测试9999"), term("_all", "测试12345"))).collect().map(println))
// queryRdd.take(5).map(println)
// println("---------------------------------- query result "+queryRdd.query(term("_all", "123")).count())
// println("---------------------------------- query result "+queryRdd.query(term("_all", "456")).count())
}
"get fields" should "show fields" in {
rdd.fields().map(println)
}
"just test" should "sqlContext.implicits" in {
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._
Seq(1,2,3).toDS()
InternalRow
}
}
| myetl/sparkLu | sparklu-core/src/test/scala/top/myetl/lucenerdd/SparkContextTest.scala | Scala | apache-2.0 | 1,920 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence.slick
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcPersistentEntityRegistry
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence._
import scala.concurrent.Future
import scala.concurrent.duration.DurationDouble
class SlickReadSideSpec
extends SlickPersistenceSpec(TestEntitySerializerRegistry)
with AbstractReadSideSpec {
import system.dispatcher
override protected lazy val persistentEntityRegistry = new JdbcPersistentEntityRegistry(system, slick)
override def processorFactory(): ReadSideProcessor[Evt] =
new SlickTestEntityReadSide.TestEntityReadSideProcessor(slickReadSide, slick.db, slick.profile)
lazy val readSide = new SlickTestEntityReadSide(slick.db, slick.profile)
override def getAppendCount(id: String): Future[Long] = readSide.getAppendCount(id)
override def afterAll(): Unit = {
persistentEntityRegistry.gracefulShutdown(5.seconds)
super.afterAll()
}
}
| rstento/lagom | persistence-jdbc/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/persistence/slick/SlickReadSideSpec.scala | Scala | apache-2.0 | 1,113 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.benchmarks
import akka.Done
import akka.actor.ActorSystem
import akka.kafka.ConsumerMessage.{Committable, CommittableMessage}
import akka.kafka.ProducerMessage.Envelope
import akka.kafka.benchmarks.app.RunTestCommand
import akka.kafka.scaladsl.Consumer.{Control, DrainingControl}
import akka.kafka.scaladsl.{Committer, Consumer, Producer}
import akka.kafka._
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import com.codahale.metrics.Meter
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{
ByteArrayDeserializer,
ByteArraySerializer,
StringDeserializer,
StringSerializer
}
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.util.Success
case class AlpakkaCommittableSinkTestFixture[SOut, FIn](sourceTopic: String,
sinkTopic: String,
msgCount: Int,
source: Source[SOut, Control],
sink: Sink[FIn, Future[Done]])
object AlpakkaCommittableSinkFixtures extends PerfFixtureHelpers {
type Key = Array[Byte]
type Val = String
type Message = CommittableMessage[Key, Val]
type ProducerMessage = Envelope[Key, Val, Committable]
private def createConsumerSettings(kafkaHost: String)(implicit actorSystem: ActorSystem) =
ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
.withBootstrapServers(kafkaHost)
.withGroupId(randomId())
.withClientId(randomId())
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
private def createProducerSettings(
kafkaHost: String
)(implicit actorSystem: ActorSystem): ProducerSettings[Array[Byte], String] =
ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
.withBootstrapServers(kafkaHost)
def producerSink(c: RunTestCommand)(implicit actorSystem: ActorSystem) =
FixtureGen[AlpakkaCommittableSinkTestFixture[Message, ProducerMessage]](
c,
msgCount => {
fillTopic(c.filledTopic, c.kafkaHost)
val sinkTopic = randomId()
val source: Source[Message, Control] =
Consumer.committableSource(createConsumerSettings(c.kafkaHost), Subscriptions.topics(c.filledTopic.topic))
val sink: Sink[ProducerMessage, Future[Done]] =
Producer.committableSink(createProducerSettings(c.kafkaHost), CommitterSettings(actorSystem))
AlpakkaCommittableSinkTestFixture[Message, ProducerMessage](c.filledTopic.topic,
sinkTopic,
msgCount,
source,
sink)
}
)
def composedSink(c: RunTestCommand)(implicit actorSystem: ActorSystem) =
FixtureGen[AlpakkaCommittableSinkTestFixture[Message, ProducerMessage]](
c,
msgCount => {
fillTopic(c.filledTopic, c.kafkaHost)
val sinkTopic = randomId()
val source: Source[Message, Control] =
Consumer.committableSource(createConsumerSettings(c.kafkaHost), Subscriptions.topics(c.filledTopic.topic))
val sink: Sink[ProducerMessage, Future[Done]] =
Producer
.flexiFlow[Key, Val, Committable](createProducerSettings(c.kafkaHost))
.map(_.passThrough)
.toMat(Committer.sink(CommitterSettings(actorSystem)))(Keep.right)
AlpakkaCommittableSinkTestFixture[Message, ProducerMessage](c.filledTopic.topic,
sinkTopic,
msgCount,
source,
sink)
}
)
}
object AlpakkaCommittableSinkBenchmarks extends LazyLogging {
import AlpakkaCommittableSinkFixtures.{Message, ProducerMessage}
val streamingTimeout: FiniteDuration = 30.minutes
type Fixture = AlpakkaCommittableSinkTestFixture[Message, ProducerMessage]
def run(fixture: Fixture, meter: Meter)(implicit mat: Materializer): Unit = {
logger.debug("Creating and starting a stream")
val msgCount = fixture.msgCount
val sinkTopic = fixture.sinkTopic
val source = fixture.source
val promise = Promise[Unit]
val logPercentStep = 1
val loggedStep = if (msgCount > logPercentStep) 100 else 1
val control = source
.map { msg =>
ProducerMessage.single(new ProducerRecord[Array[Byte], String](sinkTopic, msg.record.value()),
msg.committableOffset)
}
.map { msg =>
meter.mark()
val offset = msg.passThrough.partitionOffset.offset
if (offset % loggedStep == 0)
logger.info(s"Transformed $offset elements to Kafka (${100 * offset / msgCount}%)")
if (offset >= fixture.msgCount - 1)
promise.complete(Success(()))
msg
}
.toMat(fixture.sink)(Keep.both)
.mapMaterializedValue(DrainingControl.apply)
.run()
Await.result(promise.future, streamingTimeout)
control.drainAndShutdown()(mat.executionContext)
logger.debug("Stream finished")
}
}
| softwaremill/reactive-kafka | benchmarks/src/main/scala/akka/kafka/benchmarks/AlpakkaCommittableSinkFixtures.scala | Scala | apache-2.0 | 5,807 |
package com.crockeo.clasp
// Evaluation of language constructs.
object Eval {
import Implicits._
import Language._
import Result._
////
// Applying a function.
// Performing a function application.
private def applyFn(t: Token, c: Context): ClaspResult = t match {
case TList(TFunction(argNames, body) :: args) =>
if (argNames.length != args.length)
Left(new ClaspError(ClaspError.SyntaxError, s"Too ${if (argNames.length < args.length) "many" else "few"} arguments, expected ${argNames.length}, got ${args.length}."))
else {
val ec = argNames.zip(args).foldLeft(c.push)((c, as) => as match {
case (TAtom(name), a) => c + (name -> a)
})
(body match {
// Executing built-ins.
case TList(TAtom(name) :: xs) if (Builtin.contains(name)) =>
Eval(body, ec)
// Executing functions.
case TList(TFunction(args, body) :: xs) =>
Eval(body, ec)
// Executing lists of commands.
case TList(b) => b.foldLeft(Right(t, ec): ClaspResult)((p, t) => for {
ep <- p
et <- Eval(t, ep._2)
} yield et)
// Executing anything else.
case _ => Eval(body, ec)
}) match {
case Left(err) => Left(err)
case Right((t, _)) => Right(t, c) // Escaping out of the context.
}
}
case _ => Left(new ClaspError(ClaspError.SyntaxError, "Malformed function application."))
}
////
// General evaluation.
// Evaluating a list of arguments.
def evalList(l: List[Token], c: Context): Either[ClaspError, (List[Token], Context)] =
l.foldRight(Right(List(), c): Either[ClaspError, (List[Token], Context)])((v, ep) => for {
p <- ep
e <- Eval(v, p._2)
} yield (e._1 :: p._1, e._2))
// Evaluating a token into its reduced form.
def apply(t: Token, c: Context): ClaspResult = t match {
// Variable replacement.
case TAtom(s) if (c.contains(s)) => Right((c(s), c))
// We have to single out defn out of list application because it can't have
// the rest of the arguments be applied yet.
case TList(TAtom("defn") :: xs) =>
Builtin("defn")(xs, c)
// Singling out 'if' as well.
case TList(TAtom("if") :: xs) =>
Builtin("if")(xs, c)
// Working with lists.
case TList(l) => evalList(l, c) match {
case Left(err) => Left(err)
case Right(t) => t._1 match {
case TAtom(name) :: xs if (Builtin.contains(name)) => Builtin(name)(xs, t._2)
case TFunction(_, _) :: xs => applyFn(TList(t._1), t._2)
case _ => Right(TList(t._1), t._2)
}
}
// Not evaluating everything else.
case _ => Right((t, c))
}
}
| crockeo/clasp | src/main/scala/Eval.scala | Scala | mit | 2,817 |
package org.scalaide.core.internal.decorators.semantichighlighting.classifier
import scala.reflect.internal.util.SourceFile
import scala.tools.refactoring.common.CompilerAccess
import scala.tools.refactoring.common.EnrichedTrees
import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolTypes._
import org.scalaide.core.compiler.IScalaPresentationCompiler
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits._
/**
* Return the Symbols corresponding to this `Tree`, if any.
*
* Scala trees contain symbols when they define or reference a definition. We
* need to special-case `TypeTree`, because it does not return `true` on `hasSymbol`,
* but nevertheless it may refer to a symbol through its type. Examples of `TypeTree`s
* are the type of `ValDef`s after type checking.
*
* Another special case is `Import`, because it's selectors are not trees, therefore
* they do not have a symbol. However, it is desirable to color them, so the symbols are
* looked up on the fly.
*
* A single tree may define more than one symbol, usually with the same name. For instance:
* - a 'val' defines both a private field and a getter
* - a 'var' defines a private field, and getter/setter methods
* - a class parameter defines a constructor parameter, possibly a field and a getter
* - Import will generate one per selector
*
* Lazy values are special-cased because the underlying local var has a different
* name and there are no trees for the getter yet (added by phase lazyvals). We need
* to return the accessor, who can later be classified as `lazy`.
*/
private[classifier] trait SafeSymbol extends CompilerAccess with EnrichedTrees {
override val global: IScalaPresentationCompiler
protected def sourceFile: SourceFile
import global._
/**
* Trees that have a direct correspondence in the source code have a RangePosition.
* TransparentPositions come into play for trees that don't have a source-code
* correspondence but still have children that are visible in the source.
*/
protected def isSourceTree(t: Tree): Boolean = hasSourceCodeRepresentation(t) && !t.pos.isTransparent
private object DynamicName {
def unapply(dynamicName: Name): Option[SymbolType] = dynamicName.toString() match {
case "selectDynamic" => Some(DynamicSelect)
case "updateDynamic" => Some(DynamicUpdate)
case "applyDynamic" => Some(DynamicApply)
case "applyDynamicNamed" => Some(DynamicApplyNamed)
case _ => None
}
}
/**
* Finds out if a tree is a dynamic method call. Because such method calls are
* transformed by the compiler, no symbols exist for them. Thus, this method
* returns the SymbolType directly.
*/
protected def findDynamicMethodCall(t: Tree): Option[(SymbolType, Position)] = t match {
case Apply(Select(_, DynamicName(sym)), List(name)) =>
Some(sym -> name.pos)
case Apply(TypeApply(Select(_, DynamicName(sym)), _), List(name)) =>
Some(sym -> name.pos)
case _ =>
None
}
protected def safeSymbol(t: Tree): List[(Symbol, Position)] = t match {
case tpeTree: TypeTree =>
val originalSym =
if ((tpeTree.original eq null) || tpeTree.original == tpeTree) Nil
else safeSymbol(tpeTree.original)
// if the original tree did not find anything, we need to call
// symbol, which may trigger type checking of the underlying tree, so we
// wrap it in 'ask'
if (originalSym.isEmpty && hasSourceCodeRepresentation(tpeTree)) {
val tpeSym = global.asyncExec(Option(t.symbol)).getOption().flatten.toList
tpeSym.zip(List(tpeTree.namePosition))
} else originalSym
case Import(expr, selectors) =>
(for (ImportSelector(name, namePos, _, _) <- selectors) yield {
// create a range position for this selector.
// TODO: remove special casing once scalac is fixed, and ImportSelectors are proper trees,
// with real positions, instead of just an Int
val pos = rangePos(sourceFile, namePos, namePos, namePos + name.length)
val sym1 = if (expr.tpe ne null) global.asyncExec {
val typeSym = expr.tpe.member(name.toTypeName)
if (typeSym.exists) typeSym
else expr.tpe.member(name.toTermName)
}.getOrElse(NoSymbol)()
else NoSymbol
if (sym1 eq NoSymbol) List()
else if (sym1.isOverloaded) global.asyncExec(sym1.alternatives.take(1).zip(List(pos))).getOrElse(Nil)()
else List((sym1, pos))
}).flatten
case AppliedTypeTree(tpe, args) =>
if(!hasSourceCodeRepresentation(tpe)) args.flatMap(safeSymbol)
else (tpe :: args).flatMap(safeSymbol)
case tpe @ SelectFromTypeTree(qualifier, _) =>
global.asyncExec(tpe.symbol -> tpe.namePosition).getOption().toList ::: safeSymbol(qualifier)
case CompoundTypeTree(Template(parents, _, body)) =>
(if (isStructuralType(parents)) body else parents).flatMap(safeSymbol)
case TypeBoundsTree(lo, hi) =>
List(lo, hi).flatMap(safeSymbol)
case ValDef(_, name, tpt: TypeTree, _) if isProbableTypeBound(name) =>
tpt.original match {
case AppliedTypeTree(_, args) if isViewBound(args) =>
safeSymbol(args(1))
case AppliedTypeTree(tpe, args) if isContextBound(args) =>
global.asyncExec(tpe.symbol -> tpe.namePosition).getOption().toList
case tpt =>
safeSymbol(tpt)
}
case ExistentialTypeTree(tpt, whereClauses) =>
(tpt :: whereClauses).flatMap(safeSymbol)
case _: LabelDef =>
Nil
case tpe @ Select(qualifier, _) =>
val tpeSym = if (hasSourceCodeRepresentation(tpe)) global.asyncExec(tpe.symbol -> tpe.namePosition).getOption().toList else Nil
val qualiSym = if(hasSourceCodeRepresentation(qualifier)) safeSymbol(qualifier) else Nil
tpeSym ::: qualiSym
case SingletonTypeTree(ref) =>
safeSymbol(ref)
case _ =>
// the local variable backing a lazy value is called 'originalName$lzy'. We swap it here for its
// accessor, otherwise this symbol would fail the test in `getNameRegion`
val sym1 = Option(t.symbol).map { sym =>
if (sym.isLazy && sym.isMutable) sym.lazyAccessor
else sym
}.toList
if (!hasSourceCodeRepresentation(t)) Nil
else sym1.zip(List(t.namePosition))
}
private def isViewBound(args: List[Tree]): Boolean =
args.size == 2
private def isProbableTypeBound(name: Name): Boolean =
name.startsWith(nme.EVIDENCE_PARAM_PREFIX)
private def isStructuralType(ts: List[Tree]): Boolean =
ts.size == 1
private def isContextBound(args: List[Tree]): Boolean =
args.size == 1 && !hasSourceCodeRepresentation(args.head)
/*
* Sometimes the compiler enrich the AST with some trees not having a source
* code representation. This is true for tuple or function literals, for
* view and context bounds and others.
*
* The problem is that such trees don't have a `Range` because these trees are
* generated by the compiler to have a representation for symbols not written
* directly into the source code (but written in form of there corresponding
* literals).
*
* Thus, calling the position of such a tree results in an exception. To avoid
* this exception one needs to call this method to be on the safe side.
*/
private def hasSourceCodeRepresentation(tpt: Tree): Boolean =
tpt.pos.isRange
}
| dragos/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/decorators/semantichighlighting/classifier/SafeSymbol.scala | Scala | bsd-3-clause | 7,497 |
package org.mrgeo.spark.job
import java.io.{IOException, FileInputStream, InputStreamReader, File}
import java.util.Properties
import org.apache.spark.{Logging, SparkException, SparkConf, SparkContext}
import org.mrgeo.core.{MrGeoConstants, MrGeoProperties}
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConversions._
object PrepareJob extends Logging {
// These 3 methods are taken almost verbatim from Spark's Utils class, but they are all
// private, so we needed to copy them here
/** Load properties present in the given file. */
private def getPropertiesFromFile(filename: String): Map[String, String] = {
val file = new File(filename)
require(file.exists(), s"Properties file $file does not exist")
require(file.isFile(), s"Properties file $file is not a normal file")
val inReader = new InputStreamReader(new FileInputStream(file), "UTF-8")
try {
val properties = new Properties()
properties.load(inReader)
properties.stringPropertyNames().map(k => (k, properties(k).trim)).toMap
}
catch {
case e: IOException =>
throw new SparkException(s"Failed when loading Spark properties from $filename", e)
}
finally {
inReader.close()
}
}
private def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
env.get("SPARK_CONF_DIR")
.orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf"})
.map { t => new File(s"$t${File.separator}spark-defaults.conf")}
.filter(_.isFile)
.map(_.getAbsolutePath)
.orNull
}
private[job] def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
val path = Option(filePath).getOrElse(getDefaultPropertiesFile())
Option(path).foreach { confFile =>
getPropertiesFromFile(confFile).filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.setIfMissing(k, v)
sys.props.getOrElseUpdate(k, v)
}
}
path
}
final def prepareJob(job: JobArguments): SparkConf = {
val conf: SparkConf = new SparkConf()
loadDefaultSparkProperties(conf)
logInfo("spark.app.name: " + conf.get("spark.app.name", "<not set>") + " job.name: " + job.name)
conf.setAppName(job.name)
.setMaster(job.cluster)
.setJars(job.jars)
//.registerKryoClasses(registerClasses())
// .set("spark.driver.extraClassPath", "")
// .set("spark.driver.extraJavaOptions", "")
// .set("spark.driver.extraLibraryPath", "")
.set("spark.storage.memoryFraction", "0.25")
if (job.isYarn) {
conf.set("spark.yarn.am.cores", if (job.cores > 0) { job.cores.toString } else { "1" })
.set("spark.executor.memory", if (job.memory != null) { job.memory } else { "128m" })
.set("spark.executor.cores", if (job.cores > 0) { job.cores.toString } else { "1" })
.set("spark.cores.max", if (job.cores > 0) { job.cores.toString } else { "1" })
.set("spark.yarn.preserve.staging.files", "true")
// running in "cluster" mode, the driver runs within a YARN process
.setMaster(job.YARN + "-cluster")
}
else if (job.isSpark) {
conf.set("spark.driver.memory", if (job.memory != null) {
job.memory
}
else {
"128m"
})
.set("spark.driver.cores", if (job.cores > 0) {
job.cores.toString
}
else {
"1"
})
}
conf
}
def setupSerializer(mrgeoJob: MrGeoJob, job:JobArguments, conf:SparkConf) = {
// we need to check the serializer property, there is a bug in the registerKryoClasses in Spark < 1.3.0 that
// causes a ClassNotFoundException. So we need to add a config property to use/ignore kryo
if (MrGeoProperties.getInstance().getProperty(MrGeoConstants.MRGEO_USE_KRYO, "false").equals("true")) {
// Check and invoke for registerKryoClasses() with reflection, because isn't in pre Spark 1.2.0
try {
val method = conf.getClass.getMethod("registerKryoClasses", classOf[Array[Class[_]]])
method.invoke(conf, mrgeoJob.registerClasses())
}
catch {
case nsme: NoSuchMethodException => conf.set("spark.serializer", "org.apache.spark.serializer.JavaSerializer")
case e: Exception => e.printStackTrace()
}
}
else {
conf.set("spark.serializer", "org.apache.spark.serializer.JavaSerializer")
}
}
}
| tjkrell/MrGEO | mrgeo-core/src/main/scala/org/mrgeo/spark/job/PrepareJob.scala | Scala | apache-2.0 | 4,532 |
//
// Copyright 2013, Martin Pokorny <martin@truffulatree.org>
//
// This Source Code Form is subject to the terms of the Mozilla Public License,
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
// obtain one at http://mozilla.org/MPL/2.0/.
//
package org.truffulatree.scampi2
import scala.collection.mutable
import _root_.org.bridj.{Pointer, CLong}
trait PredefDatatypeComponent {
mpi2: Scampi2 with Mpi2LibraryComponent =>
sealed abstract class PredefDatatype[V](dt: mpi2.lib.MPI_Datatype)
extends mpi2.SeqDatatype[V] {
handlePtr.set(dt)
mpi2.Datatype.register(this)
override protected[scampi2] lazy val handle = super.handle
val multiplicity = 1
def free() {}
def offsetTo(idx: Int): Long = idx * extent.range
override def toString() = getClass.getSimpleName
}
object MpiChar extends PredefDatatype[Char](mpi2.lib.MPI_CHAR) {
assume(extent.range == AlignHelper.byteByteSize)
val alignment = AlignHelper.byteAlignment
def load(p: Pointer[_], idx: Int): Char =
p.as(classOf[Byte])(idx).toChar
def store(p: Pointer[_], idx: Int, elem: Char) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[Byte])(idx) = elem.toByte
}
def isValid(ch: Char): Boolean = ch.isValidByte
}
sealed abstract class Int8(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Byte](dt) {
assume(extent.range == AlignHelper.byteByteSize)
val alignment = AlignHelper.byteAlignment
def load(p: Pointer[_], idx: Int): Byte =
p.as(classOf[Byte])(idx)
def store(p: Pointer[_], idx: Int, elem: Byte) {
p.as(classOf[Byte])(idx) = elem
}
}
object MpiSignedChar extends Int8(mpi2.lib.MPI_SIGNED_CHAR)
object MpiInt8 extends Int8(mpi2.lib.MPI_INT8_T)
implicit object MpiByte extends Int8(mpi2.lib.MPI_BYTE)
sealed abstract class Uint8(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Short](dt) {
assume(extent.range == AlignHelper.byteByteSize)
val alignment = AlignHelper.byteAlignment
val maxVal = (1 << java.lang.Byte.SIZE).toShort
def load(p: Pointer[_], idx: Int): Short = {
val b = p.as(classOf[Byte])(idx).toShort
if (b < 0) (maxVal + b).toShort else b
}
def store(p: Pointer[_], idx: Int, elem: Short) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[Byte])(idx) = elem.toByte
}
def isValid(sh: Short) = 0 <= sh && sh < maxVal
}
object MpiUnsignedChar extends Uint8(mpi2.lib.MPI_UNSIGNED_CHAR)
object MpiUint8 extends Uint8(mpi2.lib.MPI_UINT8_T)
sealed abstract class Int16(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Short](dt) {
assume(extent.range == AlignHelper.shortByteSize)
val alignment = AlignHelper.shortAlignment
def load(p: Pointer[_], idx: Int): Short =
p.as(classOf[Short])(idx)
def store(p: Pointer[_], idx: Int, elem: Short) {
p.as(classOf[Short])(idx) = elem
}
}
implicit object MpiShort extends Int16(mpi2.lib.MPI_SHORT)
object MpiInt16 extends Int16(mpi2.lib.MPI_INT16_T)
sealed abstract class Uint16(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Int](dt) {
assume(extent.range == AlignHelper.shortByteSize)
val alignment = AlignHelper.shortAlignment
val maxVal = 2 * (Short.MaxValue + 1)
def load(p: Pointer[_], idx: Int): Int = {
val v = p.as(classOf[Short])(idx).toInt
if (v < 0) maxVal + v else v
}
def store(p: Pointer[_], idx: Int, elem: Int) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[Short])(idx) = elem.toShort
}
def isValid(int: Int): Boolean = 0 <= int && int < maxVal
}
object MpiUnsignedShort extends Uint16(mpi2.lib.MPI_UNSIGNED_SHORT)
object MpiUint16 extends Uint16(mpi2.lib.MPI_UINT16_T)
sealed abstract class Int32(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Int](dt) {
assume(extent.range == AlignHelper.intByteSize)
val alignment = AlignHelper.intAlignment
def load(p: Pointer[_], idx: Int): Int =
p.as(classOf[Int])(idx)
def store(p: Pointer[_], idx: Int, elem: Int) {
p.as(classOf[Int])(idx) = elem
}
}
implicit object MpiInt extends Int32(mpi2.lib.MPI_INT)
object MpiInt32 extends Int32(mpi2.lib.MPI_INT32_T)
sealed abstract class Uint32(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Long](dt) {
assume(extent.range == AlignHelper.intByteSize)
val alignment = AlignHelper.intAlignment
val maxVal = 2 * (Int.MaxValue + 1L)
def load(p: Pointer[_], idx: Int): Long = {
val l = p.as(classOf[Int])(idx).toLong
if (l < 0) maxVal + l else l
}
def store(p: Pointer[_], idx: Int, elem: Long) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[Int])(idx) = elem.toInt
}
def isValid(lg: Long): Boolean = 0 <= lg && lg < maxVal
}
object MpiUnsigned extends Uint32(mpi2.lib.MPI_UNSIGNED)
object MpiUint32 extends Uint32(mpi2.lib.MPI_UINT32_T)
object MpiLong extends PredefDatatype[Long](mpi2.lib.MPI_LONG) {
assume(extent.range == AlignHelper.cLongByteSize)
val alignment = AlignHelper.cLongAlignment
val maxVal = {
val cLongSize = 8 * CLong.SIZE
if (cLongSize < java.lang.Long.SIZE) (1L << (cLongSize - 1))
else Long.MaxValue
}
val minVal = {
val cLongSize = 8 * CLong.SIZE
if (cLongSize < java.lang.Long.SIZE) -(1L << (cLongSize - 1))
else Long.MinValue
}
def load(p: Pointer[_], idx: Int): Long =
p.as(classOf[CLong])(idx).longValue
def store(p: Pointer[_], idx: Int, elem: Long) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[CLong])(idx) = CLong.valueOf(elem)
}
def isValid(lg: Long): Boolean = minVal <= lg && lg < maxVal
}
sealed abstract class Int64(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Long](dt) {
assume(extent.range == AlignHelper.longByteSize)
val alignment = AlignHelper.longAlignment
def load(p: Pointer[_], idx: Int): Long =
p.as(classOf[Long])(idx)
def store(p: Pointer[_], idx: Int, elem: Long) {
p.as(classOf[Long])(idx) = elem
}
}
implicit object MpiLongLong extends Int64(mpi2.lib.MPI_LONG_LONG)
object MpiInt64 extends Int64(mpi2.lib.MPI_INT64_T)
object MpiUnsignedLong
extends PredefDatatype[Long](mpi2.lib.MPI_UNSIGNED_LONG) {
assume(extent.range == AlignHelper.cLongByteSize)
val alignment = AlignHelper.cLongAlignment
val maxVal = {
val cLongSize = 8 * CLong.SIZE
if (cLongSize < java.lang.Long.SIZE) (1L << cLongSize)
else Long.MaxValue
}
def load(p: Pointer[_], idx: Int): Long = {
val l = p.as(classOf[CLong])(idx).longValue
if (l < 0) l + maxVal else l
}
def store(p: Pointer[_], idx: Int, elem: Long) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[CLong])(idx) = CLong.valueOf(elem)
}
def isValid(lg: Long): Boolean = 0 <= lg && lg < maxVal
}
sealed abstract class Uint64(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[Long](dt) {
// The Scala representation of this type is clearly insufficiently wide; it
// should perhaps be replaced with something else.
assume(extent.range == AlignHelper.longByteSize)
val alignment = AlignHelper.longAlignment
val maxVal = Long.MaxValue
def load(p: Pointer[_], idx: Int): Long = {
val l = p.as(classOf[Long])(idx)
if (l < 0) l + maxVal else l
}
def store(p: Pointer[_], idx: Int, elem: Long) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
p.as(classOf[Long])(idx) = elem
}
def isValid(lg: Long): Boolean = 0 <= lg
}
object MpiUnsignedLongLong extends Uint64(mpi2.lib.MPI_UNSIGNED_LONG_LONG)
object MpiUint64 extends Uint64(mpi2.lib.MPI_UINT64_T)
implicit object MpiFloat extends PredefDatatype[Float](mpi2.lib.MPI_FLOAT) {
assume(extent.range == AlignHelper.floatByteSize)
val alignment = AlignHelper.floatAlignment
def load(p: Pointer[_], idx: Int): Float =
p.as(classOf[Float])(idx)
def store(p: Pointer[_], idx: Int, elem: Float) {
p.as(classOf[Float])(idx) = elem
}
}
implicit object MpiDouble extends PredefDatatype[Double](mpi2.lib.MPI_DOUBLE) {
assume(extent.range == AlignHelper.doubleByteSize)
val alignment = AlignHelper.doubleAlignment
def load(p: Pointer[_], idx: Int): Double =
p.as(classOf[Double])(idx)
def store(p: Pointer[_], idx: Int, elem: Double) {
p.as(classOf[Double])(idx) = elem
}
}
object MpiWchar extends PredefDatatype[Int](mpi2.lib.MPI_WCHAR) {
assume(extent.range == AlignHelper.intByteSize)
val alignment = AlignHelper.intAlignment
def load(p: Pointer[_], idx: Int): Int =
p.as(classOf[Int])(idx)
def store(p: Pointer[_], idx: Int, elem: Int) {
p.as(classOf[Int])(idx) = elem
}
}
implicit object MpiBoolean extends PredefDatatype[Boolean](mpi2.lib.MPI_C_BOOL) {
assume(extent.range == AlignHelper.byteByteSize)
val alignment = AlignHelper.byteAlignment
def load(p: Pointer[_], idx: Int): Boolean =
p.as(classOf[Byte])(idx) != 0
def store(p: Pointer[_], idx: Int, elem: Boolean) {
p.as(classOf[Byte])(idx) = if (elem) 1 else 0
}
}
sealed abstract class FloatComplex(dt: mpi2.lib.MPI_Datatype)
extends PredefDatatype[(Float, Float)](dt) {
assume(extent.range == 2 * AlignHelper.floatByteSize)
val alignment = AlignHelper.floatAlignment
def load(p: Pointer[_], idx: Int): (Float,Float) = {
val f = p.as(classOf[Float])
(f(2 * idx), f(2 * idx + 1))
}
def store(p: Pointer[_], idx: Int, elem: (Float,Float)) {
val f = p.as(classOf[Float])
elem match {
case (re, im) => {
f(2 * idx) = re
f(2 * idx + 1) = im
}
}
}
}
object MpiComplex extends FloatComplex(mpi2.lib.MPI_C_COMPLEX)
object MpiFloatComplex extends FloatComplex(mpi2.lib.MPI_C_FLOAT_COMPLEX)
object MpiDoubleComplex
extends PredefDatatype[(Double, Double)](mpi2.lib.MPI_C_DOUBLE_COMPLEX) {
assume(extent.range == 2 * AlignHelper.doubleByteSize)
val alignment = AlignHelper.doubleAlignment
def load(p: Pointer[_], idx: Int): (Double,Double) = {
val f = p.as(classOf[Double])
(f(2 * idx), f(2 * idx + 1))
}
def store(p: Pointer[_], idx: Int, elem: (Double,Double)) {
val f = p.as(classOf[Double])
elem match {
case (re, im) => {
f(2 * idx) = re
f(2 * idx + 1) = im
}
}
}
}
object MpiFloatInt
extends PredefDatatype[(Float, Int)](mpi2.lib.MPI_FLOAT_INT) {
val alignment = AlignHelper.floatAlignment
private val intOffset =
AlignHelper.align(AlignHelper.floatByteSize, AlignHelper.intAlignment)
assume(
extent.range ==
AlignHelper.align(intOffset + AlignHelper.intByteSize, alignment))
def load(p: Pointer[_], idx: Int): (Float,Int) = {
val pf = p.offset(idx * extent.range)
val pi = pf.offset(intOffset)
(pf.as(classOf[Float])(0), pi.as(classOf[Int])(0))
}
def store(p: Pointer[_], idx: Int, elem: (Float,Int)) {
val pf = p.offset(idx * extent.range)
val pi = pf.offset(intOffset)
elem match {
case (flt, int) => {
pf.as(classOf[Float])(0) = flt
pi.as(classOf[Int])(0) = int
}
}
}
}
object MpiDoubleInt
extends PredefDatatype[(Double, Int)](mpi2.lib.MPI_DOUBLE_INT) {
val alignment = AlignHelper.doubleAlignment
private val intOffset =
AlignHelper.align(AlignHelper.doubleByteSize, AlignHelper.intAlignment)
assume(
extent.range ==
AlignHelper.align(intOffset + AlignHelper.intByteSize, alignment))
def load(p: Pointer[_], idx: Int): (Double,Int) = {
val pf = p.offset(idx * extent.range)
val pi = pf.offset(intOffset)
(pf.as(classOf[Double])(0), pi.as(classOf[Int])(0))
}
def store(p: Pointer[_], idx: Int, elem: (Double,Int)) {
val pf = p.offset(idx * extent.range)
val pi = pf.offset(intOffset)
elem match {
case (dbl, int) => {
pf.as(classOf[Double])(0) = dbl
pi.as(classOf[Int])(0) = int
}
}
}
}
object MpiLongInt
extends PredefDatatype[(Long, Int)](mpi2.lib.MPI_LONG_INT) {
val alignment = AlignHelper.cLongAlignment
private val intOffset =
AlignHelper.align(AlignHelper.cLongByteSize, AlignHelper.intAlignment)
assume(
extent.range ==
AlignHelper.align(intOffset + AlignHelper.intByteSize, alignment))
val maxLongVal = {
val cLongSize = 8 * CLong.SIZE
if (cLongSize < java.lang.Long.SIZE) (1L << (cLongSize - 1))
else Long.MaxValue
}
val minLongVal = {
val cLongSize = 8 * CLong.SIZE
if (cLongSize < java.lang.Long.SIZE) -(1L << (cLongSize - 1))
else Long.MinValue
}
def load(p: Pointer[_], idx: Int): (Long,Int) = {
val pl = p.offset(idx * extent.range)
val pi = pl.offset(intOffset)
(pl.as(classOf[CLong])(0).longValue, pi.as(classOf[Int])(0))
}
def store(p: Pointer[_], idx: Int, elem: (Long,Int)) {
require(isValid(elem), mpi2.invalidValueErrorMsg)
val pl = p.offset(idx * extent.range)
val pi = pl.offset(intOffset)
elem match {
case (lg, int) => {
pl.as(classOf[CLong])(0) = CLong.valueOf(lg)
pi.as(classOf[Int])(0) = int
}
}
}
def isValid(li: (Long, Int)): Boolean =
minLongVal <= li._1 && li._1 < maxLongVal
}
object MpiIntInt
extends PredefDatatype[(Int, Int)](mpi2.lib.MPI_2INT) {
assume(extent.range == 2 * AlignHelper.intByteSize)
val alignment = AlignHelper.intAlignment
def load(p: Pointer[_], idx: Int): (Int,Int) = {
val pi = p.as(classOf[Int])
(pi(2 * idx), pi(2 * idx + 1))
}
def store(p: Pointer[_], idx: Int, elem: (Int,Int)) {
val pi = p.as(classOf[Int])
elem match {
case (i1, i2) => {
pi(2 * idx) = i1
pi(2 * idx + 1) = i2
}
}
}
}
object MpiShortInt
extends PredefDatatype[(Short, Int)](mpi2.lib.MPI_SHORT_INT) {
val alignment = AlignHelper.shortAlignment
private val intOffset =
AlignHelper.align(AlignHelper.shortByteSize, AlignHelper.intAlignment)
assume(
extent.range ==
AlignHelper.align(intOffset + AlignHelper.intByteSize, alignment))
def load(p: Pointer[_], idx: Int): (Short,Int) = {
val ps = p.offset(idx * extent.range).as(classOf[Short])
val pi = ps.offset(intOffset).as(classOf[Int])
(ps(0), pi(0))
}
def store(p: Pointer[_], idx: Int, elem: (Short,Int)) {
val ps = p.offset(idx * extent.range)
val pi = ps.offset(intOffset)
elem match {
case (sh, int) => {
ps.as(classOf[Short])(0) = sh
pi.as(classOf[Int])(0) = int
}
}
}
}
// implicit object MpiAint
// extends PredefDatatype[mpi2.lib.MPI_Aint](mpi2.lib.MPI_AINT) {
// val alignment = mpi2.aintAlignment
// def load(p: Pointer[_], idx: Int): mpi2.lib.MPI_Aint =
// p.as(classOf[mpi2.lib.MPI_Aint])(idx)
// def store(p: Pointer[_], idx: Int, elem: mpi2.lib.MPI_Aint) {
// p.as(classOf[mpi2.lib.MPI_Aint])(idx) = elem
// }
// }
// object MpiOffset
// extends PredefDatatype[mpi2.lib.MPI_Offset](mpi2.lib.MPI_OFFSET) {
// val alignment = mpi2.offsetAlignment
// def load(p: Pointer[_], idx: Int): mpi2.lib.MPI_Offset =
// p.as(classOf[mpi2.lib.MPI_Offset])(idx)
// def store(p: Pointer[_], idx: Int, elem: mpi2.lib.MPI_Offset) {
// p.as(classOf[mpi2.lib.MPI_Offset])(idx) = elem
// }
// }
object MpiPacked extends mpi2.Datatype[Any] {
handlePtr.set(mpi2.lib.MPI_PACKED)
mpi2.Datatype.register(this)
override lazy val handle = super.handle
def free() {}
val multiplicity = 1
val alignment = 1
override def toString(): String = getClass.getSimpleName
}
}
| mpokorny/scampi | src/main/scala/org/truffulatree/scampi2/PredefDatatypeComponent.scala | Scala | mpl-2.0 | 16,229 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cs.ucla.edu.bwaspark.fastq
import java.io.{File, FileReader, InputStreamReader, BufferedReader, IOException, FileNotFoundException}
import java.nio.{ByteBuffer, CharBuffer}
import java.nio.charset.{Charset, CharsetEncoder, CharacterCodingException}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import scala.util.control.Breaks._
import scala.List
import scala.collection.parallel.mutable.ParArray
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.util.{Success, Failure}
import scala.concurrent.duration._
import cs.ucla.edu.avro.fastq._
import org.apache.hadoop.mapreduce.Job
//import org.apache.parquet.hadoop.ParquetOutputFormat
//import org.apache.parquet.avro.AvroParquetOutputFormat
//import org.apache.parquet.hadoop.util.ContextUtil
//import org.apache.parquet.hadoop.metadata.CompressionCodecName
// NOTE: Currently Parquet 1.8.1 is not compatible with Spark 1.5.1
// At the current time, we use the old Parquet 1.6.0 for uploading data to HDFS
import parquet.hadoop.ParquetOutputFormat
import parquet.avro.AvroParquetOutputFormat
import parquet.hadoop.util.ContextUtil
import parquet.hadoop.metadata.CompressionCodecName
import java.util.logging.{Level, Logger}
// batchedLineNum: the number of reads processed each time
class FASTQLocalFileLoader(batchedLineNum: Int) {
var isEOF = false
var ioWaitingTime = 0
/**
* Read the FASTQ file from a local directory
* This function reads only partial FASTQs and should be called several times to read the whole FASTQ file
*
* @param reader the Java BufferedReader object to read a file line by line
* @param sc the spark context
* @param batchedNum the number of lines to read per batch
* @param filePartitionNum the number of partitions in HDFS of this batch. We suggest to set this number equal to the number of core in the cluster.
*/
def batchedRDDReader(reader: BufferedReader, sc: SparkContext, batchedNum: Int, filePartitionNum: Int): Vector[FASTQRecord] = {
val charset = Charset.forName("ASCII")
val encoder = charset.newEncoder()
var records: Vector[FASTQRecord] = scala.collection.immutable.Vector.empty
var lineNum = 0
var isBreak = false
while(lineNum < batchedNum && !isBreak) {
val line = reader.readLine()
if(line != null) {
val lineFields = line.split(" ")
var tmpStr = new String
if(lineFields.length <= 0) {
println("Error: Input format not handled")
System.exit(1);
}
val n = lineFields(0).size
if(lineFields(0).charAt(0) == '@') {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).substring(1, n-2)
else
tmpStr = lineFields(0).substring(1)
}
else {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).dropRight(2)
else
tmpStr = lineFields(0)
}
val name = encoder.encode( CharBuffer.wrap(tmpStr) )
var comment: String = ""
if(lineFields.length >= 2) {
var i: Int = 1
while(i < lineFields.length - 1) {
comment += (lineFields(i) + " ")
i += 1
}
comment += lineFields(lineFields.length - 1)
}
val seqString = reader.readLine()
val seqLength = seqString.size
val seq = encoder.encode( CharBuffer.wrap(seqString) )
// read out the third line
reader.readLine()
val quality = encoder.encode( CharBuffer.wrap(reader.readLine()) )
if(lineFields.length >= 2) {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap(comment) ))
records = records :+ record
}
else {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap("") ))
records = records :+ record
}
lineNum += 4
}
else {
isEOF = true
isBreak = true
}
}
records
}
/**
* Read the FASTQ file from the local file system and store it in HDFS
* The FASTQ is encoded and compressed in the Parquet+Avro format in HDFS
* Note that there will be several directories since the local large FASTQ file is read and stored in HDFS with several batches
*
* @param sc the spark context
* @param inFile the input FASTQ file in the local file system
* @param outFileRootPath the root path of the output FASTQ files in HDFS.
* @param filePartitionNum the number of partitions in HDFS of this batch. We suggest to set this number equal to the number of core in the cluster.
*/
def storeFASTQInHDFS(sc: SparkContext, inFile: String, outFileRootPath: String, filePartitionNum: Int) {
val conf = new Configuration
val fs = FileSystem.get(conf)
val path = new Path(inFile)
var reader: BufferedReader = null
if (fs.exists(path)) {
reader = new BufferedReader(new InputStreamReader(fs.open(path)))
}
else {
reader = new BufferedReader(new FileReader(inFile)) //file reader
}
val parquetHadoopLogger = Logger.getLogger("parquet.hadoop")
parquetHadoopLogger.setLevel(Level.SEVERE)
var i: Int = 0
while(!isEOF) {
val serializedRecords = batchedRDDReader(reader, sc, batchedLineNum, filePartitionNum).map(new SerializableFASTQRecord(_))
if(serializedRecords.length > 0) {
val pairRDD = sc.parallelize(serializedRecords, filePartitionNum).map(rec => (null, rec))
val job = new Job(pairRDD.context.hadoopConfiguration)
// Configure the ParquetOutputFormat to use Avro as the serialization format
//ParquetOutputFormat.setCompression(job, CompressionCodecName.GZIP)
ParquetOutputFormat.setCompression(job, CompressionCodecName.UNCOMPRESSED)
ParquetOutputFormat.setEnableDictionary(job, true)
ParquetOutputFormat.setBlockSize(job, 128 * 1024 * 1024)
ParquetOutputFormat.setPageSize(job, 1 * 1024 * 1024)
// Pass the Avro Schema
AvroParquetOutputFormat.setSchema(job, cs.ucla.edu.avro.fastq.FASTQRecord.SCHEMA$)
// Save the RDD to a Parquet file in our temporary output directory
val outputPath = outFileRootPath + "/" + i.toString();
//pairRDD.saveAsNewAPIHadoopFile(outputPath, classOf[Void], classOf[FASTQRecord], classOf[AvroParquetOutputFormat[FASTQRecord]], ContextUtil.getConfiguration(job))
pairRDD.saveAsNewAPIHadoopFile(outputPath, classOf[Void], classOf[FASTQRecord], classOf[AvroParquetOutputFormat], ContextUtil.getConfiguration(job))
i += 1
}
}
}
/**
* Read the FASTQ files (pair-end, 2 files) from a local directory
* This function reads only a batch of FASTQs and should be called several times to read the whole FASTQ files
*
* @param reader1 the Java BufferedReader object to read a file line by line (on one end of the read)
* @param reader2 the Java BufferedReader object to read a file line by line (on the other end of the read)
* @param sc the spark context
* @param batchedNum the number of lines to read per batch
* @param filePartitionNum the number of partitions in HDFS of this batch. We suggest to set this number equal to the number of core in the cluster.
*/
def batchedPairEndRDDReader(reader1: BufferedReader, reader2: BufferedReader, sc: SparkContext, batchedNum: Int, filePartitionNum: Int): Vector[PairEndFASTQRecord] = {
val charset = Charset.forName("ASCII")
val encoder = charset.newEncoder()
var records: Vector[PairEndFASTQRecord] = scala.collection.immutable.Vector.empty
var lineNum = 0
var isBreak = false
while(lineNum < batchedNum && !isBreak) {
var line = reader1.readLine()
if(line != null) {
val lineFields = line.split(" ")
var pairEndRecord = new PairEndFASTQRecord
var tmpStr = new String
if(lineFields.length <= 0) {
println("Error: Input format not handled")
System.exit(1);
}
val n = lineFields(0).size
if(lineFields(0).charAt(0) == '@') {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).substring(1, n-2)
else
tmpStr = lineFields(0).substring(1)
}
else {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).dropRight(2)
else
tmpStr = lineFields(0)
}
val name = encoder.encode( CharBuffer.wrap(tmpStr) )
var comment: String = ""
if(lineFields.length >= 2) {
var i: Int = 1
while(i < lineFields.length - 1) {
comment += (lineFields(i) + " ")
i += 1
}
comment += lineFields(lineFields.length - 1)
}
val seqString = reader1.readLine()
val seqLength = seqString.size
val seq = encoder.encode( CharBuffer.wrap(seqString) )
// read out the third line
reader1.readLine()
val quality = encoder.encode( CharBuffer.wrap(reader1.readLine()) )
if(lineFields.length >= 2) {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap(comment) ))
pairEndRecord.setSeq0(new SerializableFASTQRecord(record))
}
else {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap("") ))
pairEndRecord.setSeq0(new SerializableFASTQRecord(record))
}
line = reader2.readLine
if(line == null) {
println("Error: the number of two FASTQ files are different")
System.exit(1)
}
else {
val lineFields = line.split(" ")
if(lineFields.length <= 0) {
println("Error: Input format not handled")
System.exit(1);
}
val n = lineFields(0).size
if(lineFields(0).charAt(0) == '@') {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).substring(1, n-2)
else
tmpStr = lineFields(0).substring(1)
}
else {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).dropRight(2)
else
tmpStr = lineFields(0)
}
val name = encoder.encode( CharBuffer.wrap(tmpStr) )
var comment: String = ""
if(lineFields.length >= 2) {
var i: Int = 1
while(i < lineFields.length - 1) {
comment += (lineFields(i) + " ")
i += 1
}
comment += lineFields(lineFields.length - 1)
}
val seqString = reader2.readLine()
val seqLength = seqString.size
val seq = encoder.encode( CharBuffer.wrap(seqString) )
// read out the third line
reader2.readLine()
val quality = encoder.encode( CharBuffer.wrap(reader2.readLine()) )
if(lineFields.length >= 2) {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap(comment) ))
pairEndRecord.setSeq1(new SerializableFASTQRecord(record))
}
else {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap("") ))
pairEndRecord.setSeq1(new SerializableFASTQRecord(record))
}
}
records = records :+ pairEndRecord
lineNum += 8
}
else {
isEOF = true
isBreak = true
}
}
records
}
/**
* Read the FASTQ file and place reads in an array
*
* @param batchedLineNum the number of lines processed each time (equals to # of reads * 4)
* @param reader the Java BufferedReader object to read a file line by line
* @return a ParArray that store these reads
*/
def bufferedReadFASTQ(batchedLineNum: Int, reader: BufferedReader) : ParArray[RawRead] = {
val parArraySize = batchedLineNum / 4
val rawReads: ParArray[RawRead] = new ParArray(parArraySize)
var lineNum = 0
var readIdx = 0
var isBreak = false
while(lineNum < batchedLineNum && !isBreak) {
var line = reader.readLine
if(line != null) {
val read = new RawRead
read.name = line
read.seq = reader.readLine
read.description = reader.readLine
read.qual = reader.readLine
rawReads(readIdx) = read
lineNum += 4
readIdx += 1
}
else {
isEOF = true
isBreak = true
}
}
if(!isBreak) {
println("Raw read #: " + rawReads.size)
rawReads
}
else {
val remainingRawReads: ParArray[RawRead] = new ParArray(readIdx)
println("Remaining raw read #: " + remainingRawReads.size)
var i = 0
while(i < readIdx) {
remainingRawReads(i) = rawReads(i)
i += 1
}
remainingRawReads
}
}
/**
* Transform raw read to read record
*
* @param rawRead the raw read store in the String format
* @return a read record
*/
def RawRead2FASTQRecord(rawRead: RawRead): SerializableFASTQRecord = {
val charset = Charset.forName("ASCII")
val encoder = charset.newEncoder
val lineFields = rawRead.name.split(" ")
var tmpStr = new String
if(lineFields.length <= 0) {
println("Error: Input format not handled")
System.exit(1);
}
val n = lineFields(0).size
if(lineFields(0).charAt(0) == '@') {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).substring(1, n-2)
else
tmpStr = lineFields(0).substring(1)
}
else {
if(lineFields(0).substring(n-2).equals("/1") || lineFields(0).substring(n-2).equals("/2"))
tmpStr = lineFields(0).dropRight(2)
else
tmpStr = lineFields(0)
}
val name = encoder.encode( CharBuffer.wrap(tmpStr) )
var comment: String = ""
if(lineFields.length >= 2) {
var i: Int = 1
while(i < lineFields.length - 1) {
comment += (lineFields(i) + " ")
i += 1
}
comment += lineFields(lineFields.length - 1)
}
val seqLength = rawRead.seq.size
val seq = encoder.encode( CharBuffer.wrap(rawRead.seq) )
val quality = encoder.encode( CharBuffer.wrap(rawRead.qual) )
if(lineFields.length >= 2) {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap(comment) ))
val serializedRecord = new SerializableFASTQRecord(record)
serializedRecord
}
else {
val record = new FASTQRecord(name, seq, quality, seqLength, encoder.encode( CharBuffer.wrap("") ))
val serializedRecord = new SerializableFASTQRecord(record)
serializedRecord
}
}
/**
* Read the FASTQ files (pair-end, 2 files) from a local directory in a PARALLEL way
* This function reads only a batch of FASTQs and should be called several times to read the whole FASTQ files
*
* @param reader1 the Java BufferedReader object to read a file line by line (on one end of the read)
* @param reader2 the Java BufferedReader object to read a file line by line (on the other end of the read)
* @param batchedNum the number of lines to read per batch
*/
def parallelBatchedPairEndRDDReader(reader1: BufferedReader, reader2: BufferedReader, batchedNum: Int): Vector[PairEndFASTQRecord] = {
var records: Vector[PairEndFASTQRecord] = scala.collection.immutable.Vector.empty
val readEnds0 = bufferedReadFASTQ(batchedNum / 2, reader1)
val readEnds1 = bufferedReadFASTQ(batchedNum / 2, reader2)
val fastqRecords0 = readEnds0.map( RawRead2FASTQRecord(_) )
val fastqRecords1 = readEnds1.map( RawRead2FASTQRecord(_) )
if(fastqRecords0.size != fastqRecords1.size) {
println("Error: the number of two FASTQ files are different")
System.exit(1)
}
var i: Int = 0
while(i < fastqRecords0.size) {
val pairEndRecord = new PairEndFASTQRecord
pairEndRecord.setSeq0(fastqRecords0(i))
pairEndRecord.setSeq1(fastqRecords1(i))
records = records :+ pairEndRecord
i += 1
}
records
}
/**
* Read the FASTQ file from the local file system and store it in HDFS
* The FASTQ is encoded and compressed in the Parquet+Avro format in HDFS
* Note that there will be several directories since the local large FASTQ file is read and stored in HDFS with several batches
*
* @param sc the spark context
* @param inFile1 the input FASTQ file in the local file system (on one end of the read)
* @param inFile2 the input FASTQ file in the local file system (on the other end of the read)
* @param outFileRootPath the root path of the output FASTQ files in HDFS.
* @param filePartitionNum the number of partitions in HDFS of this batch. We suggest to set this number equal to the number of core in the cluster.
*/
def storePairEndFASTQInHDFS(sc: SparkContext, inFile1: String, inFile2: String, outFileRootPath: String, filePartitionNum: Int) {
val reader1 = new BufferedReader(new FileReader(inFile1))
val reader2 = new BufferedReader(new FileReader(inFile2))
val parquetHadoopLogger = Logger.getLogger("parquet.hadoop")
parquetHadoopLogger.setLevel(Level.SEVERE)
var i: Int = 0
var isHDFSWriteDone: Boolean = true // a done signal for writing data to HDFS
while(!isEOF) {
//val serializedRecords = batchedPairEndRDDReader(reader1, reader2, sc, batchedLineNum, filePartitionNum).map(new SerializablePairEndFASTQRecord(_)) // old implementation
val serializedRecords = parallelBatchedPairEndRDDReader(reader1, reader2, batchedLineNum).map(new SerializablePairEndFASTQRecord(_))
if(serializedRecords.length > 0) {
println("[DEBUG] Main Thread, Before while loop: isHDFSWriteDone = " + isHDFSWriteDone)
while(!isHDFSWriteDone) {
try {
println("Waiting for I/O")
ioWaitingTime += 1
Thread.sleep(1000) // sleep for one second
} catch {
case e: InterruptedException => Thread.currentThread.interrupt
}
}
println("[DEBUG] Main Thread, After while loop: isHDFSWriteDone = " + isHDFSWriteDone)
this.synchronized {
isHDFSWriteDone = false
}
println("[DEBUG] Main Thread, Final value: isHDFSWriteDone = " + isHDFSWriteDone)
val f: Future[Int] = Future {
val pairRDD = sc.parallelize(serializedRecords, filePartitionNum).map(rec => (null, rec))
val job = new Job(pairRDD.context.hadoopConfiguration)
// Configure the ParquetOutputFormat to use Avro as the serialization format
//ParquetOutputFormat.setWriteSupportClass(job, classOf[AvroWriteSupport[PairEndFASTQRecord]])
//ParquetOutputFormat.setCompression(job, CompressionCodecName.GZIP)
ParquetOutputFormat.setCompression(job, CompressionCodecName.UNCOMPRESSED)
ParquetOutputFormat.setEnableDictionary(job, true)
ParquetOutputFormat.setBlockSize(job, 128 * 1024 * 1024)
ParquetOutputFormat.setPageSize(job, 1 * 1024 * 1024)
// Pass the Avro Schema
AvroParquetOutputFormat.setSchema(job, cs.ucla.edu.avro.fastq.PairEndFASTQRecord.SCHEMA$)
// Save the RDD to a Parquet file in our temporary output directory
val outputPath = outFileRootPath + "/" + i.toString();
pairRDD.saveAsNewAPIHadoopFile(outputPath, classOf[Void], classOf[PairEndFASTQRecord], classOf[AvroParquetOutputFormat], ContextUtil.getConfiguration(job))
//pairRDD.saveAsNewAPIHadoopFile(outputPath, classOf[Void], classOf[PairEndFASTQRecord], classOf[AvroParquetOutputFormat[PairEndFASTQRecord]], ContextUtil.getConfiguration(job))
i += 1
1
}
f onComplete {
case Success(s) => {
println("[DEBUG] Forked thread, Before: isHDFSWriteDone = " + isHDFSWriteDone)
println("Successfully write the FASTQ file partitions to HDFS: " + s)
this.synchronized {
isHDFSWriteDone = true
}
println("[DEBUG] Forked thread, After: isHDFSWriteDone = " + isHDFSWriteDone)
}
case Failure(f) => println("An error has occured: " + f.getMessage)
}
}
}
println("[DEBUG] Main Thread (Final iteration), Before while loop: isHDFSWriteDone = " + isHDFSWriteDone)
while(!isHDFSWriteDone) {
try {
println("Waiting for I/O")
Thread.sleep(1000) // sleep for one second
} catch {
case e: InterruptedException => Thread.currentThread.interrupt
}
}
println("[DEBUG] Main Thread (Final iteration), After while loop: isHDFSWriteDone = " + isHDFSWriteDone)
this.synchronized {
isHDFSWriteDone = false
}
println("[DEBUG] Main Thread (Final iteration), Final value: isHDFSWriteDone = " + isHDFSWriteDone)
}
}
| ytchen0323/cloud-scale-bwamem | src/main/scala/cs/ucla/edu/bwaspark/fastq/FASTQLocalFileLoader.scala | Scala | apache-2.0 | 22,708 |
package io.udash.web.guide.demos.rpc
import io.udash.rpc._
import io.udash.web.guide.rpc.ClientRPC
import scala.concurrent.Future
class PingServer(implicit clientId: ClientId) extends PingServerRPC {
import io.udash.web.Implicits._
override def ping(id: Int): Unit = {
ClientRPC(clientId).demos().pingDemo().pong(id)
}
override def fPing(id: Int): Future[Int] = {
Future.successful(id)
}
}
| UdashFramework/udash-core | guide/backend/src/main/scala/io/udash/web/guide/demos/rpc/PingServer.scala | Scala | apache-2.0 | 413 |
package controllers
import javax.inject.{Inject,Named}
import akka.actor.ActorRef
import models.Formatters._
import models._
import play.api.Configuration
import play.api.libs.json.{JsValue, Json}
import play.api.mvc._
import sender.{Messages, CampaignSupervisor}
import services._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* Created by unoedx on 09/04/16.
*/
class MailerController @Inject()(
authService: AuthService,
mailer: MailService,
configuration: Configuration,
feedbackService: FeedbackService,
@Named(CampaignSupervisor.name) campaignSupervisor: ActorRef
) extends Controller {
/**
*
* Example:
* {
"auth": {
"partnerId": "123",
"apiKey": "123",
"campaignId": "123"
},
"bulkMail": {
"subject": "Test",
"html":"HTML Test",
"text":"Text Test",
"fromName":"Dev Wavein",
"fromEmail":"dev@wavein.ch",
"mails": [
{
"email": "andrea@wavien.ch",
"params": {
"uuid": "asfkjksd"
}
}
]
}
}
*
* @return
*/
def send() = Action.async(parse.json(maxLength = 100 * 1024 * 1024)) { r =>
val sendMailAction = r.body.as[SendMailAction](Formatters.sendMailAction)
println("Recived:")
println(r.body)
val bulkMail = sendMailAction.bulkMail
val auth = sendMailAction.auth
val result = for {
isAuthorized <- authService.isAuthorized(auth) if isAuthorized
quota <- mailer.quota()
} yield {
val testerParams = testers.map(m => MailParams(m, Map("uuid" -> "UUID-key")))
val bm = if(auth.campaignId.contains("inviotest")) {
bulkMail.copy(mails = testerParams)
} else {
bulkMail.copy(mails = testerParams ++ bulkMail.mails)
}
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,bm,quota)
Ok(Json.obj("result" -> true))
}
result recover {
case _ => Forbidden
}
}
def sandboxSuccess(num:Int) = Action.async{
mailer.quota().map{ quota =>
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,Sandbox.bulkSuccess(num),quota)
Ok("Campaign submitted")
}
}
def sandboxBounce(num:Int) = Action.async{
mailer.quota().map{ quota =>
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,Sandbox.bulkBounce(num),quota)
Ok("Campaign submitted")
}
}
def sandboxComplaint(num:Int) = Action.async{
mailer.quota().map{ quota =>
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,Sandbox.bulkComplaint(num),quota)
Ok("Campaign submitted")
}
}
def sandboxOOTO(num:Int) = Action.async{
mailer.quota().map{ quota =>
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,Sandbox.bulkOoto(num),quota)
Ok("Campaign submitted")
}
}
def sandboxSuppression(num:Int) = Action.async{
mailer.quota().map{ quota =>
campaignSupervisor ! Messages.Campaign(testers,mailer,feedbackService,Sandbox.bulkSuppression(num),quota)
Ok("Campaign submitted")
}
}
def bounce() = Action.async{ r =>
val msg = message(r.body.asText.get)
val feedback = Feedback.fromMessage(msg,"bounce")
println("bounce:" + msg)
feedbackService.bounces(feedback).map{_ => Ok("received") }
}
def complaint() = Action.async{ r =>
val msg = message(r.body.asText.get)
val feedback = Feedback.fromMessage(msg,"complaint")
println("complaint:" + msg)
feedbackService.bounces(feedback).map{_ => Ok("received") }
}
def success() = Action.async{ r =>
val msg = message(r.body.asText.get)
val feedback = Feedback.fromMessage(msg,"success")
println("success:" + msg)
feedbackService.delivery(feedback).map{_ => Ok("received") }
}
private val testers = configuration.getStringSeq("adt.testers").get
private def message(body:String):JsValue = Json.parse((Json.parse(body) \\ "Message").as[String])
}
| waveinch/ses-transactional | app/controllers/MailerController.scala | Scala | apache-2.0 | 4,388 |
package models.user
import com.artclod.collection.MustHandle
import models.quiz.question.table.{GraphMatchQuestionsTable, TangentQuestionsTable, DerivativeGraphQuestionsTable, DerivativeQuestionsTable}
import play.api.db.slick.Config.driver.simple._
package object table {
val usersTable = TableQuery[UsersTable]
val friendsTable = TableQuery[FriendsTable]
// ==== Start Alerts
val gameCompletedAlertsTable = TableQuery[GameCompletedAlertsTable]
val alertTables = MustHandle(gameCompletedAlertsTable)
// ==== End Alerts
}
| kristiankime/web-education-games | app/models/user/table/package.scala | Scala | mit | 541 |
package akka.contrib.persistence.mongodb
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RxMongoPersistenceSnapshotTckSpec extends SnapshotTckSpec(classOf[RxMongoPersistenceExtension])
| tomzhang/akka-persistence-mongo | rxmongo/src/test/scala/akka/contrib/persistence/mongodb/RxMongoPersistenceSnapshotTckSpec.scala | Scala | apache-2.0 | 247 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package impl
import com.intellij.pom.java.LanguageLevel
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
/**
* @author ilyas
*/
class ScTemplateDefinitionStubImpl[ParentPsi <: PsiElement](parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement])
extends StubBaseWrapper[ScTemplateDefinition](parent, elemType) with ScTemplateDefinitionStub {
var myName: String = _
var myQualName: String = _
var myJavaQualName: String = _
var mySourceFileName: String = _
var myMethodNames: Array[String] = Array[String]()
var myJavaName: String = _
var myAdditionalJavaNames: Array[String] = Array.empty
private var _isScriptFileClass: Boolean = _
private var _isPackageObject: Boolean = _
private var _isDeprecated: Boolean = _
private var _isImplicitObject: Boolean = _
private var _isImplicitClass: Boolean = _
private var local: Boolean = false
private var visibleInJava: Boolean = false
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
name: String,
qualName: String,
javaQualName: String,
sourceFileName: String,
methodNames: Array[String],
isPackageObject: Boolean,
isScriptFileClass: Boolean,
isDeprecated: Boolean,
isImplicitObject: Boolean,
isImplicitClass: Boolean,
javaName: String,
additionalJavaNames: Array[String],
isLocal: Boolean,
visibleInJava: Boolean) {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
mySourceFileName = sourceFileName
myName = name
myQualName = qualName
myJavaQualName = javaQualName
myMethodNames = methodNames
myJavaName = javaName
myAdditionalJavaNames = additionalJavaNames
this._isPackageObject = isPackageObject
_isScriptFileClass = isScriptFileClass
_isDeprecated = isDeprecated
_isImplicitObject = isImplicitObject
_isImplicitClass = isImplicitClass
local = isLocal
this.visibleInJava = visibleInJava
}
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
name: StringRef,
qualName: StringRef,
javaQualName: StringRef,
sourceFileName: StringRef,
methodNames: Array[StringRef],
isPackageObject: Boolean,
isScriptFileClass: Boolean,
isDeprecated: Boolean,
isImplicitObject: Boolean,
isImplicitClass: Boolean,
javaName: StringRef,
additionalJavaNames: Array[StringRef],
isLocal: Boolean,
visibleInJava: Boolean) {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
mySourceFileName = StringRef.toString(sourceFileName)
myName = StringRef.toString(name)
myQualName = StringRef.toString(qualName)
myJavaQualName = StringRef.toString(javaQualName)
myMethodNames = methodNames.map(StringRef.toString(_))
myJavaName = StringRef.toString(javaName)
myAdditionalJavaNames = additionalJavaNames.map(StringRef.toString(_))
this._isPackageObject = isPackageObject
_isScriptFileClass = isScriptFileClass
_isDeprecated = isDeprecated
_isImplicitObject = isImplicitObject
_isImplicitClass = isImplicitClass
local = isLocal
this.visibleInJava = visibleInJava
}
def isVisibleInJava: Boolean = visibleInJava
def isLocal: Boolean = local
def isPackageObject: Boolean = _isPackageObject
def sourceFileName = mySourceFileName
def qualName = myQualName
def javaQualName = myJavaQualName
def getName = myName
def methodNames: Array[String] = myMethodNames
def isScriptFileClass: Boolean = _isScriptFileClass
def isDeprecated: Boolean = _isDeprecated
def isImplicitObject: Boolean = _isImplicitObject
def isImplicitClass: Boolean = _isImplicitClass
//todo PsiClassStub methods
def getLanguageLevel: LanguageLevel = LanguageLevel.JDK_1_5
def isEnum: Boolean = false
def isInterface: Boolean = false
def isAnonymous: Boolean = false
def isAnonymousInQualifiedNew: Boolean = false
def isAnnotationType: Boolean = false
def hasDeprecatedAnnotation: Boolean = false
def isEnumConstantInitializer: Boolean = false
def getBaseClassReferenceText: String = null
def additionalJavaNames: Array[String] = myAdditionalJavaNames
def javaName: String = myJavaName
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScTemplateDefinitionStubImpl.scala | Scala | apache-2.0 | 4,892 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.spark
import com.azure.cosmos.implementation.{CosmosClientMetadataCachesSnapshot, SparkBridgeImplementationInternal}
import com.azure.cosmos.spark.diagnostics.{DiagnosticsContext, LoggerHelper}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.read.{Batch, InputPartition, PartitionReaderFactory}
import org.apache.spark.sql.types.StructType
import java.time.Duration
import java.util.UUID
private class ChangeFeedBatch
(
session: SparkSession,
schema: StructType,
config: Map[String, String],
cosmosClientStateHandle: Broadcast[CosmosClientMetadataCachesSnapshot],
diagnosticsConfig: DiagnosticsConfig
) extends Batch {
@transient private lazy val log = LoggerHelper.getLogger(diagnosticsConfig, this.getClass)
private val correlationActivityId = UUID.randomUUID()
private val batchId = correlationActivityId.toString
log.logTrace(s"Instantiated ${this.getClass.getSimpleName}")
private val defaultParallelism = session.sparkContext.defaultParallelism
override def planInputPartitions(): Array[InputPartition] = {
log.logInfo(s"--> planInputPartitions $batchId")
val readConfig = CosmosReadConfig.parseCosmosReadConfig(config)
val clientConfiguration = CosmosClientConfiguration.apply(config, readConfig.forceEventualConsistency)
val containerConfig = CosmosContainerConfig.parseCosmosContainerConfig(config)
val partitioningConfig = CosmosPartitioningConfig.parseCosmosPartitioningConfig(config)
val changeFeedConfig = CosmosChangeFeedConfig.parseCosmosChangeFeedConfig(config)
Loan(
CosmosClientCache.apply(
clientConfiguration,
Some(cosmosClientStateHandle),
s"ChangeFeedBatch.planInputPartitions(batchId $batchId)"
)).to(cacheItem => {
val container = ThroughputControlHelper.getContainer(config, containerConfig, cacheItem.client)
// This maps the StartFrom settings to concrete LSNs
val initialOffsetJson = CosmosPartitionPlanner.createInitialOffset(container, changeFeedConfig, None)
// Calculates the Input partitions based on start Lsn and latest Lsn
val latestOffset = CosmosPartitionPlanner.getLatestOffset(
config,
ChangeFeedOffset(initialOffsetJson, None),
changeFeedConfig.toReadLimit,
// ok to use from cache because endLsn is ignored in batch mode
Duration.ofMillis(PartitionMetadataCache.refreshIntervalInMsDefault),
clientConfiguration,
this.cosmosClientStateHandle,
containerConfig,
partitioningConfig,
this.defaultParallelism,
container
)
// Latest offset above has the EndLsn specified based on the point-in-time latest offset
// For batch mode instead we need to reset it so that the change feed will get fully drained
val inputPartitions = latestOffset
.inputPartitions
.get
.map(partition => partition
.withContinuationState(
SparkBridgeImplementationInternal
.extractChangeFeedStateForRange(initialOffsetJson, partition.feedRange),
clearEndLsn = true))
log.logInfo(s"<-- planInputPartitions $batchId (creating ${inputPartitions.length} partitions)")
inputPartitions
})
}
override def createReaderFactory(): PartitionReaderFactory = {
ChangeFeedScanPartitionReaderFactory(
config,
schema,
DiagnosticsContext(correlationActivityId, "Batch"),
cosmosClientStateHandle,
diagnosticsConfig)
}
}
| Azure/azure-sdk-for-java | sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/ChangeFeedBatch.scala | Scala | mit | 3,677 |
package io.iteratee.files
import cats.MonadError
import io.iteratee.{ Enumerator, Iteratee }
import java.io.{ Closeable, File, InputStream, OutputStream }
import java.util.zip.ZipEntry
import scala.util.control.NonFatal
trait FileModule[F[_]] {
def readLines(file: File): Enumerator[F, String]
def readLinesFromStream(stream: InputStream): Enumerator[F, String]
def readBytes(file: File): Enumerator[F, Array[Byte]]
def readBytesFromStream(stream: InputStream): Enumerator[F, Array[Byte]]
def readZipStreams(file: File): Enumerator[F, (ZipEntry, InputStream)]
def listFiles(dir: File): Enumerator[F, File]
def listFilesRec(dir: File): Enumerator[F, File]
def writeLines(file: File): Iteratee[F, String, Unit]
def writeLinesToStream(stream: OutputStream): Iteratee[F, String, Unit]
def writeBytes(file: File): Iteratee[F, Array[Byte], Unit]
def writeBytesToStream(stream: OutputStream): Iteratee[F, Array[Byte], Unit]
protected final def bracket[R <: Closeable, A](fr: F[R])(f: R => F[A])(implicit F: MonadError[F, Throwable]): F[A] =
F.flatMap(fr) { r =>
F.handleErrorWith(f(r)) {
case NonFatal(e) =>
try r.close() catch {
// We've already failed, so we ignore this exception.
case NonFatal(_) => ()
}
F.raiseError(e)
}
}
}
| flyingwalrusllc/iteratee | files/src/main/scala/io/iteratee/files/FileModule.scala | Scala | apache-2.0 | 1,334 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import java.nio.file.{Files, Paths}
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.{DataSet, SampleToMiniBatch, _}
import scala.collection.mutable
import com.intel.analytics.bigdl.parameters.{ConstantClippingProcessor,
L2NormClippingProcessor, ParameterProcessor}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils._
import com.intel.analytics.bigdl.visualization.{TrainSummary, ValidationSummary}
import org.apache.log4j.Logger
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ArrayBuffer
import scala.reflect.{ClassTag, classTag}
/**
* [[Optimizer]] is an abstract class which is used to train a model automatically
* with some certain optimization algorithms.
*
* @param model the model to be trained
* @param dataset the data set used to train a model
* @param criterion the criterion used to evaluate the loss of the model given an input
* @tparam T numeric type, which can be [[Float]] or [[Double]]
* @tparam D the type of elements in DataSet, such as [[MiniBatch]]
*/
// TODO: remove D to be MiniBatch[T]
abstract class Optimizer[T: ClassTag, D](
protected var model: Module[T],
protected var dataset: DataSet[D],
protected var criterion: Criterion[T])(implicit ev : TensorNumeric[T])
{
import Optimizer.{logger, checkSubModules}
protected var state: Table = T()
protected var optimMethods: Map[String, OptimMethod[T]] = Map(model.getName -> new SGD())
protected var endWhen: Trigger = Trigger.maxIteration(100)
protected var checkpointTrigger: Option[Trigger] = None
protected var checkpointPath: Option[String] = None
protected var isOverWrite: Boolean = false
protected var validationTrigger: Option[Trigger] = None
protected var validationMethods: Option[Array[ValidationMethod[T]]] = None
protected var validationDataSet: Option[DataSet[MiniBatch[T]]] = None
// To save the summaries.
protected var trainSummary: Option[TrainSummary] = None
protected var validationSummary: Option[ValidationSummary] = None
// To achieve better performance, please set dropPercentage as 0.04
protected var dropPercentage: Double = 0.0
protected var maxDropPercentage: Double = 0.0
protected var computeThresholdbatchSize: Int = 100
protected var warmupIterationNum: Int = 200
/**
* a list of ParameterProcessor, orders matter
*/
protected var parameterProcessors = ArrayBuffer[ParameterProcessor]()
model.checkDuplicate()
/**
* Trigger the optimization process
* @return the model to be trained
*/
def optimize(): Module[T]
/**
* make optimizer not check the singleton model on a node
* @return
*/
@deprecated("Use bigdl.check.singleton instead", "0.1.0")
def disableCheckSingleton(): this.type = {
this.checkSingleton = false
println("disableCheckSingleton is deprecated. Please use bigdl.check.singleton instead")
this
}
// TODO: Remove below code to DistriOptimizer after disableCheckSingleton is not supported
protected var checkSingleton = System.getProperty("bigdl.check.singleton",
false.toString).toBoolean
/**
* Set a validate evaluation
*
* @param trigger how often to evaluation validation set
* @param dataset validate data set in type of [[DataSet]] of [[MiniBatch]]
* @param vMethods a set of validation method [[ValidationMethod]]
* @return this optimizer
*/
def setValidation(trigger: Trigger, dataset: DataSet[MiniBatch[T]],
vMethods : Array[ValidationMethod[T]])
: this.type = {
this.validationTrigger = Some(trigger)
this.validationDataSet = Some(dataset)
this.validationMethods = Some(vMethods)
this
}
/**
* Set a validate evaluation
*
* @param trigger how often to evaluation validation set
* @param sampleRDD validate data set in type of [[RDD]] of [[Sample]]
* @param vMethods a set of validation method [[ValidationMethod]]
* @param batchSize batch size
* @param featurePaddingParam feature padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
* @param labelPaddingParam label padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
*
* @return this optimizer
*/
def setValidation(trigger: Trigger, sampleRDD: RDD[Sample[T]],
vMethods : Array[ValidationMethod[T]], batchSize: Int,
featurePaddingParam: PaddingParam[T],
labelPaddingParam: PaddingParam[T]
): this.type = {
this.validationTrigger = Some(trigger)
val dataSet =
(DataSet.rdd(sampleRDD) ->
SampleToMiniBatch(batchSize, Some(featurePaddingParam), Some(labelPaddingParam)))
.toDistributed()
this.validationDataSet = Some(dataSet)
this.validationMethods = Some(vMethods)
this
}
/**
* Set a validate evaluation
*
* @param trigger how often to evaluation validation set
* @param sampleRDD validate data set in type of [[RDD]] of [[Sample]]
* @param vMethods a set of validation method [[ValidationMethod]]
* @param batchSize batch size
* @return this optimizer
*/
def setValidation(trigger: Trigger, sampleRDD: RDD[Sample[T]],
vMethods : Array[ValidationMethod[T]], batchSize: Int)
: this.type = {
this.validationTrigger = Some(trigger)
val dataSet =
(DataSet.rdd(sampleRDD) -> SampleToMiniBatch(batchSize))
.toDistributed()
this.validationDataSet = Some(dataSet)
this.validationMethods = Some(vMethods)
this
}
/**
* Set validate evaluation
* @param trigger how often to evaluation validation set
* @param sampleRDD validate data set in type of [[RDD]] of [[Sample]]
* @param vMethods a set of validation method [[ValidationMethod]]
* @param batchSize batch size
* @param miniBatch construct MiniBatch with a specified miniBatch type
* @return
*/
def setValidation(trigger: Trigger, sampleRDD: RDD[Sample[T]],
vMethods : Array[ValidationMethod[T]], batchSize: Int, miniBatch: MiniBatch[T])
: this.type = {
this.validationTrigger = Some(trigger)
val dataSet =
(DataSet.rdd(sampleRDD) -> SampleToMiniBatch(miniBatch, batchSize, None))
.toDistributed()
this.validationDataSet = Some(dataSet)
this.validationMethods = Some(vMethods)
this
}
/**
* Set a check point saved at `path` triggered by `trigger`
*
* @param path the directory to save
* @param trigger how often to save the check point
* @return the optimizer
*/
def setCheckpoint(path: String, trigger: Trigger): this.type = {
if (!path.startsWith(File.hdfsPrefix)) {
require(Files.isDirectory(Paths.get(path)), s"Optimizer.setCheckpoint: $path is not a folder")
}
this.checkpointPath = Some(path)
this.checkpointTrigger = Some(trigger)
this
}
/**
* Get the directory of saving checkpoint
*/
def getCheckpointPath(): Option[String] = {
this.checkpointPath
}
/**
* Enable train summary.
*/
def setTrainSummary(trainSummary: TrainSummary): this.type = {
this.trainSummary = Some(trainSummary)
this
}
/**
* Enable validation summary.
*/
def setValidationSummary(validationSummary: ValidationSummary): this.type = {
this.validationSummary = Some(validationSummary)
this
}
/**
* Enable overwrite saving checkpoint
*/
def overWriteCheckpoint() : this.type = {
isOverWrite = true
this
}
private def resetEpoch(): Unit = {
optimMethods.foreach{ case (moduleName, optimMethod) =>
optimMethod.state.update("epoch", 1)
optimMethod.state.update("neval", 1)
optimMethod.state.update("Loss", Float.PositiveInfinity)
optimMethod.state.update("score", 0f)
optimMethod.state.update("recordsProcessedThisEpoch", 0)
}
}
/**
* Set a model to the optimizer.
* Notice: if current optimMethod in this optimizer is not a global optimMethod,
* this setModel will throw an exception. You should use setModelAndOptimMethods instead.
*
* @param newModel new model
*/
def setModel(newModel: Module[T]): this.type = {
// check if the old optimMethods is a global one.
if (optimMethods.size == 1 && optimMethods.contains(model.getName())) {
if (newModel.getName() != model.getName()) {
optimMethods = Map(newModel.getName() -> optimMethods(model.getName()))
}
logger.info(s"Optimizer.setModel: Detect current optimMethod is a global optimMethod." +
s" Automatically associate the current optimMethod with the new model.")
} else {
throw new IllegalArgumentException("Optimizer.setModel: Detect current optimMethod" +
" is not a global optimMethod. Please use setModelAndOptimMethods")
}
model = newModel
model.checkDuplicate()
// if a new Model is set, then reset "epoch", "neval" .etc.
resetEpoch()
this
}
/**
* Set new model and new optimMethods to the optimizer.
*
* @param newModel new model
* @param newOptimMethods new optimMethods
*/
def setModelAndOptimMethods(
newModel: Module[T],
newOptimMethods: Map[String, OptimMethod[T]]): this.type = {
// check if the old optimMethods is a global one.
model = newModel
optimMethods = newOptimMethods
model.checkDuplicate()
// if a new Model is set, then reset "epoch", "neval" .etc.
resetEpoch()
this
}
/**
* Set new train dataset.
* User can supply a customized implementation of trait MiniBatch to define
* how data is organized and retrieved in a mini batch.
*
* @param sampleRDD training Samples
* @param batchSize mini batch size
* @param miniBatchImpl An User-Defined MiniBatch implementation.
* @return the Optimizer
*/
def setTrainData(sampleRDD: RDD[Sample[T]],
batchSize: Int,
miniBatchImpl: MiniBatch[T]): this.type = {
throw new UnsupportedOperationException(
s"setTrainData(sampleRDD, batchSize,miniBatch) " +
s"is only supported in distributed optimizer")
this
}
/**
* Set new train dataset.
*
* @param sampleRDD training Samples
* @param batchSize mini batch size
* @param featurePaddingParam feature padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
* @param labelPaddingParam label padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
* @return the optimizer
*/
def setTrainData(sampleRDD: RDD[Sample[T]],
batchSize: Int,
featurePaddingParam: PaddingParam[T] = null,
labelPaddingParam: PaddingParam[T] = null): this.type = {
throw new UnsupportedOperationException(
s"setTrainData(sampleRDD,batchSize,featurePaddingParam=null,labelPaddingParam=null) " +
s"is only supported in distributed optimizer")
this
}
/**
* Set a new criterion to the optimizer
*
* @param newCriterion new criterion
*/
def setCriterion(newCriterion: Criterion[T]): this.type = {
this.criterion = newCriterion
this
}
/**
* Set a state(learning rate, epochs...) to the optimizer
*
* @param state the state to be saved
*/
def setState(state: Table): this.type = {
this.state = state
this
}
/**
* Set an optimization method
*
* @param method optimization method
*/
def setOptimMethod(method : OptimMethod[T]): this.type = {
checkSubModules(model, Array(model.getName()))
this.optimMethods = Map(model.getName -> method)
this
}
/**
* Set optimization methods for each submodule.
*
* @param method A mapping of submodule -> OptimMethod
*/
def setOptimMethods(method: Map[String, OptimMethod[T]]): this.type = {
checkSubModules(model, method.keys.toSeq)
this.optimMethods = method
this
}
/**
* When to stop, passed in a [[Trigger]]
*
* @param endWhen when to end
* @return the optimizer
*/
def setEndWhen(endWhen: Trigger): this.type = {
this.endWhen = endWhen
this
}
/**
* Set dropping a certain percentage (`dropPercentage`) of models during distributed
* training to accelerate, because some cached model may take too long.
*
* @param dropPercentage drop percentage
* @param maxDropPercentage max drop percentage
* @param batchsize batch size
* @param warmupIteration how may iteration to warm up
* @return this optimizer
*/
def setDropModuleProperty(dropPercentage: Double, maxDropPercentage: Double,
batchsize: Int = 100, warmupIteration: Int = 200): this.type = {
this.dropPercentage = dropPercentage
this.maxDropPercentage = maxDropPercentage
require(dropPercentage >= 0 && dropPercentage <= maxDropPercentage)
this.computeThresholdbatchSize = batchsize
this.warmupIterationNum = warmupIteration
this
}
def prepareInput(): Unit = {}
/**
* Disable gradient clipping
* @return
*/
def disableGradientClipping()
: this.type = {
parameterProcessors = parameterProcessors.filterNot(processor =>
(processor.isInstanceOf[ConstantClippingProcessor] ||
processor.isInstanceOf[L2NormClippingProcessor]))
this
}
/**
* Set constant gradient clipping
* @param min the minimum value to clip by
* @param max the maximum value to clip by
* @return
*/
def setConstantGradientClipping(min: Double, max: Double)
: this.type = {
require(min <= max, "min value can not be larger than max")
val index = Optimizer.findIndex[ConstantClippingProcessor](parameterProcessors)
if (index == -1) {
parameterProcessors.append(new ConstantClippingProcessor(min, max))
} else {
parameterProcessors(index) = new ConstantClippingProcessor(min, max)
}
this
}
/**
* Clip gradient to a maximum L2-norm
* @param l2NormThreshold gradient L2-Norm threshold
* @return
*/
def setGradientClippingByl2Norm(l2NormThreshold: Double)
: this.type = {
require(optimMethods.size == 1, "Only support 1 optimMethod.")
require(l2NormThreshold > 0, "l2NormThreshold should larger than zero")
val index = Optimizer.findIndex[L2NormClippingProcessor](parameterProcessors)
if (index == -1) {
parameterProcessors.append(new L2NormClippingProcessor(l2NormThreshold))
} else {
parameterProcessors(index) = new L2NormClippingProcessor(l2NormThreshold)
}
this
}
/**
* shutdown the optimizer, which will release the native resources if exists.
*/
private[optim] def shutdown(): Unit = {}
def reserveOptim(reserve: Boolean): this.type = {
throw new UnsupportedOperationException(
"Only support DistriOptimizer to reserve optim methods for each worker")
}
}
object Optimizer {
private val logger: Logger = Logger.getLogger(getClass)
private[bigdl] def header(epoch: Int, count: Int, total: Long, iter: Int, wallClockTime: Long)
: String = {
s"[Epoch $epoch $count/$total][Iteration $iter][Wall Clock ${wallClockTime / 1e9}s]"
}
/**
* Check if the sub modules are in the model, if each sub modules' parameter
* is contiguous, if sub modules' parameter is duplicated.
* @param model
* @param subModuleNames
* @param ev
* @tparam T
*/
private[bigdl] def checkSubModules[T: ClassTag](
model: Module[T],
subModuleNames: Seq[String])(implicit ev: TensorNumeric[T]): Unit = {
val modelParameters = model.getParameters()
val p = subModuleNames.map{subModuleName =>
val subModule = model(subModuleName)
require(subModule.isDefined, s"Optimizer: couldn't find $subModuleName in $model")
val subModuleWeights = subModule.get.getParameters()._1
require(subModuleWeights.nElement() > 0, s"Optimizer: $subModuleName doesn't have" +
s" any trainable parameters, please check your model and optimMethods.")
// If the storage subModule's parameter is the same with the storage of the submodule,
// then subModule's parameter is contiguous.
require(modelParameters._1.storage() == subModuleWeights.storage(), s"Optimizer:" +
s" $subModuleName's parameter is not contiguous.")
(subModuleName, subModuleWeights)
}.toArray
// make sure if parameters in submodules aren't duplicated.
if (p.length != 1) {
val sortedWeights = p.sortWith((a, b) => a._2.storageOffset() < b._2.storageOffset())
var i = 0
while (i < sortedWeights.length - 1) {
val current = sortedWeights(i)
val next = sortedWeights(i + 1)
require(current._2.storageOffset() + current._2.nElement() <= next._2.storageOffset(),
s"Optimizer: ${current._1} and ${next._1}'s parameters are duplicated." +
s" Please check your model and optimMethods.")
i += 1
}
}
}
/**
* Combine the hyper parameters in optimMethods.
*/
private[bigdl] def getHyperParameterLog(optimMethods: Map[String, OptimMethod[_]]): String = {
optimMethods.map{ case (moduleName, optimMethod) =>
val log = optimMethod.getHyperParameter()
if (log.isEmpty) {
log
} else {
s"${moduleName}'s hyper parameters: ${log} "
}
}.reduce(_ + _)
}
/**
* Save a model to a directory as a checkpoint
*
* @param model the model to be saved
* @param checkpointPath the directory to save at
* @param overWrite if save name model exists in the directory,
* is overwrite or not.
* @param postfix the postfix of a model name
* @tparam T model data type [[Double]] or [[Float]]
*/
private[bigdl] def saveModel[T](model: Module[T], checkpointPath : Option[String],
overWrite : Boolean, postfix: String = ""): Unit = {
if (checkpointPath.isDefined) {
model.save(s"${checkpointPath.get}/model$postfix", overWrite)
}
}
/**
* Save a state to a directory as a checkpoint
*
* @param state the state (learning rate, epochs...) to be saved
* @param checkpointPath the directory to save at
* @param overWrite if save name model exists in the directory,
* is overwrite or not.
* @param postfix the postfix of a state name
*/
private[bigdl] def saveState(state: Table, checkpointPath : Option[String], overWrite : Boolean,
postfix: String = ""): Unit = {
if (checkpointPath.isDefined) {
state.save(s"${checkpointPath.get}/state$postfix", overWrite)
}
}
/**
* Save OptimMethod to a directory as a checkpoint
* @param optimMethod the method to be saved
* @param checkpointPath the directory to save at
* @param overWrite if save name method exists in the directory,
* is overwrite or not.
* @param postfix the postfix of a method name
* @tparam T
*/
private[bigdl] def saveOptimMethod[T: ClassTag]
(optimMethod: OptimMethod[T], checkpointPath : Option[String], overWrite : Boolean,
postfix: String = ""): Unit = {
if (checkpointPath.isDefined) {
optimMethod.save(s"${checkpointPath.get}/optimMethod$postfix", overWrite)
}
}
/**
* Apply an Optimizer.
*
* @param model model will be optimized
* @param sampleRDD training Samples
* @param criterion loss function
* @param batchSize mini batch size
* @param featurePaddingParam feature padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
* @param labelPaddingParam label padding strategy, see
* [[com.intel.analytics.bigdl.dataset.PaddingParam]] for details.
* @return An optimizer
*/
def apply[T: ClassTag](
model: Module[T],
sampleRDD: RDD[Sample[T]],
criterion: Criterion[T],
batchSize: Int,
featurePaddingParam: PaddingParam[T] = null,
labelPaddingParam: PaddingParam[T] = null
)(implicit ev: TensorNumeric[T]): Optimizer[T, MiniBatch[T]] = {
val _featurePaddingParam = if (featurePaddingParam != null) Some(featurePaddingParam) else None
val _labelPaddingParam = if (labelPaddingParam != null) Some(labelPaddingParam) else None
new DistriOptimizer[T](
_model = model,
_dataset = (DataSet.rdd(sampleRDD) ->
SampleToMiniBatch(batchSize, _featurePaddingParam, _labelPaddingParam))
.toDistributed(),
_criterion = criterion
).asInstanceOf[Optimizer[T, MiniBatch[T]]]
}
/**
* Apply an optimizer.
* User can supply a customized implementation of trait MiniBatch to define
* how data is organize and retrieved in a mini batch.
*
* @param model model will be optimized
* @param sampleRDD training Samples
* @param criterion loss function
* @param batchSize mini batch size
* @param miniBatchImpl An User-Defined MiniBatch implementation
* @return an new Optimizer
*/
def apply[T: ClassTag](
model: Module[T],
sampleRDD: RDD[Sample[T]],
criterion: Criterion[T],
batchSize: Int,
miniBatchImpl: MiniBatch[T]
)(implicit ev: TensorNumeric[T]): Optimizer[T, MiniBatch[T]] = {
new DistriOptimizer[T](
_model = model,
_dataset = (DataSet.rdd(sampleRDD) ->
SampleToMiniBatch(miniBatchImpl, batchSize, None))
.toDistributed(),
_criterion = criterion
).asInstanceOf[Optimizer[T, MiniBatch[T]]]
}
/**
* Apply an optimizer.
*
* @param model model will be optimizied
* @param dataset the input dataset - determines the type of optimizer
* @param criterion loss function
* @return an new Optimizer
*/
def apply[T: ClassTag, D](
model: Module[T],
dataset: DataSet[D],
criterion: Criterion[T]
)(implicit ev: TensorNumeric[T]): Optimizer[T, D] = {
dataset match {
case d: DistributedDataSet[_] =>
new DistriOptimizer[T](
_model = model,
_dataset = d.toDistributed().asInstanceOf[DistributedDataSet[MiniBatch[T]]],
_criterion = criterion
).asInstanceOf[Optimizer[T, D]]
case d: LocalDataSet[_] =>
new LocalOptimizer[T](
model = model,
dataset = d.toLocal().asInstanceOf[LocalDataSet[MiniBatch[T]]],
criterion = criterion
).asInstanceOf[Optimizer[T, D]]
case _ =>
throw new UnsupportedOperationException
}
}
/**
* find the index of type T
* @param parameterProcessors
* @return index
*/
private[Optimizer] def findIndex[T <: ParameterProcessor: ClassTag](
parameterProcessors: ArrayBuffer[ParameterProcessor]): Int = {
var i = 0
while(i < parameterProcessors.size) {
if (classTag[T].runtimeClass.isInstance(parameterProcessors(i))) {
return i
}
i += 1
}
return -1
}
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/optim/Optimizer.scala | Scala | apache-2.0 | 23,613 |
/*
* Copyright (C) 2013 Michael Thorsley
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*/
package com.eigenvektor.graph
import org.scalatest.FlatSpec
class TestDiGraph extends FlatSpec {
"DiGraph" should "initalize to empty" in {
val g = DiGraph[String]()()
assert (g.numNodes == 0)
assert (g.numEdges == 0)
}
it should "contain only nodes" in {
val g = DiGraph[String]("One", "Two", "Three")()
assert (g.numNodes == 3)
assert (g.numEdges == 0)
}
it should "contain two edges" in {
val g = DiGraph[String]("One", "Two", "Three")("One"->"Two", "One"->"Three")
assert (g.numNodes == 3)
assert (g.numEdges == 2)
assert (g.getNeighbours("One").map(_.to) == Set("Two", "Three"))
assert (g.getNeighbours("Two").map(_.to) == Set())
assert (g.getNeighbours("Three").map(_.to) == Set())
}
it should "reverse correctly" in {
val g = DiGraph[String]("One", "Two", "Three")("One"->"Two", "One"->"Three")
val gr = g.reverse
assert (gr.numNodes == 3)
assert (gr.numEdges == 2)
assert (gr.getNeighbours("One").map(_.to) == Set())
assert (gr.getNeighbours("Two").map(_.to) == Set("One"))
assert (gr.getNeighbours("Three").map(_.to) == Set("One"))
}
it should "fail to create" in {
intercept[NoSuchElementException] {
val g = DiGraph[String]("One", "Two", "Three")("One"->"Two", "One"->"Three", "Five"->"Three")
}
}
it should "add nodes correctly" in {
val g = DiGraph[String]("One", "Two", "Three")("One"->"Two", "One"->"Three")
assert (g.numNodes == 3)
assert (g.numEdges == 2)
val h = g + "Four"
assert (h.numNodes == 4)
assert (h.numEdges == 2)
assert (h.getNeighbours("Four").map(_.to) == Set())
assert (h.getNeighbours("One").map(_.to) == Set("Two", "Three"))
assert (h.getNeighbours("Two").map(_.to) == Set())
assert (h.getNeighbours("Three").map(_.to) == Set())
}
it should "add edges correctly" in {
val g = DiGraph[String]("One", "Two", "Three")("One"->"Two", "One"->"Three")
assert (g.numNodes == 3)
assert (g.numEdges == 2)
val h = g + "Four" + new DiGraph.DiGraphEdge("Four", "One")
assert (h.numNodes == 4)
assert (h.numEdges == 3)
assert (h.getNeighbours("Four").map(_.to) == Set("One"))
assert (h.getNeighbours("One").map(_.to) == Set("Two", "Three"))
assert (h.getNeighbours("Two").map(_.to) == Set())
assert (h.getNeighbours("Three").map(_.to) == Set())
}
"WeightedDiGraph" should "construct correctly" in {
val g = DiGraph.getWeighted[String]("One", "Two", "Three")(("One", "Two", 5), ("One", "Three", 10))
assert (g.numNodes == 3)
assert (g.numEdges == 2)
}
it should "add nodes correctly" in {
val g = DiGraph.getWeighted[String]("One", "Two", "Three")(("One", "Two", 5), ("One", "Three", 10))
assert (g.numNodes == 3)
assert (g.numEdges == 2)
val h = g + "Four"
assert (h.numNodes == 4)
assert (h.numEdges == 2)
assert (h.getNeighbours("Four").map(_.to) == Set())
assert (h.getNeighbours("One").map(_.to) == Set("Two", "Three"))
assert (h.getNeighbours("Two").map(_.to) == Set())
assert (h.getNeighbours("Three").map(_.to) == Set())
assert (h.getNeighbours("One").map(_.weight) == Set(5, 10))
}
it should "add edges correctly" in {
val g = DiGraph.getWeighted[String]("One", "Two", "Three")(("One", "Two", 5), ("One", "Three", 10))
assert (g.numNodes == 3)
assert (g.numEdges == 2)
val h = g + "Four" + new DiGraph.WeightedDiGraphEdge("Four", "One", 8)
assert (h.numNodes == 4)
assert (h.numEdges == 3)
assert (h.getNeighbours("Four").map(_.to) == Set("One"))
assert (h.getNeighbours("One").map(_.to) == Set("Two", "Three"))
assert (h.getNeighbours("Two").map(_.to) == Set())
assert (h.getNeighbours("Three").map(_.to) == Set())
assert (h.getNeighbours("One").map(_.weight) == Set(5, 10))
assert (h.getNeighbours("Four").map(_.weight) == Set(8))
}
} | Vyzen/trout | src/test/scala/com/eigenvektor/graph/TestDiGraph.scala | Scala | gpl-3.0 | 4,683 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.cfclerk
import com.normation.eventlog.EventActor
package object domain {
val CfclerkEventActor = EventActor("CFClerk")
} | fanf/cf-clerk | src/main/scala/com/normation/cfclerk/domain/package.scala | Scala | agpl-3.0 | 1,821 |
import shapeless._
/**
* Nexus: Typesafe tensors / deep learning for Scala.
* @author Tongfei Chen
* @since 0.1.0
*/
package object nexus {
// Some alias for HList / HNil: Think `$` as the end of a regex
private[nexus] val $: HNil = HNil // explicit type annotation to avoid some implicit search bugs
type Id[A] = A
def Id[A](a: A): Id[A] = a
val ? = Slice.?
private[nexus] var seed: Long = System.nanoTime()
def setSeed(newSeed: Long): Unit = synchronized {
seed = newSeed
randomSource = new java.util.Random(seed)
}
private[nexus] var randomSource = new java.util.Random(seed)
}
| ctongfei/nexus | tensor/src/main/scala/nexus/package.scala | Scala | mit | 620 |
package org.sisioh.aws4s.s3.model
import com.amazonaws.services.s3.model.{ EncryptionMaterials, PutInstructionFileRequest, S3ObjectId }
import org.sisioh.aws4s.PimpedType
import scala.collection.JavaConverters._
object PutInstructionFileRequestFactory {
def create(s3ObjectId: S3ObjectId, matDesc: Map[String, String], suffix: String): PutInstructionFileRequest =
new PutInstructionFileRequest(s3ObjectId, matDesc.asJava, suffix)
def create(s3ObjectId: S3ObjectId,
encryptionMaterials: EncryptionMaterials,
suffix: String): PutInstructionFileRequest =
new PutInstructionFileRequest(s3ObjectId, encryptionMaterials, suffix)
}
class RichPutInstructionFileRequest(val underlying: PutInstructionFileRequest)
extends AnyVal
with PimpedType[PutInstructionFileRequest] {
def s3ObjectId: S3ObjectId = underlying.getS3ObjectId
def encryptionMaterialsOpt: Option[EncryptionMaterials] =
Option(underlying.getEncryptionMaterials)
def materialsDescription: Map[String, String] =
underlying.getMaterialsDescription.asScala.toMap
def suffix: String = underlying.getSuffix
def cannedAclOpt = Option(underlying.getCannedAcl)
def accessControlListOpt = Option(underlying.getAccessControlList)
def redirectLocationOpt = Option(underlying.getRedirectLocation)
def storageClassOpt = Option(underlying.getStorageClass)
}
| sisioh/aws4s | aws4s-s3/src/main/scala/org/sisioh/aws4s/s3/model/RichPutInstructionFileRequest.scala | Scala | mit | 1,385 |
/*
* Copyright (c) 2012-15 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.dynamics
import labelled.{ FieldType, field }
import ops.coproduct.{ Inject, Selector => CSelector }
import ops.hlist.{ At, Init, Last, Prepend, Selector, ReplaceAt, Replacer, Tupler }
import ops.record.{ Selector => RSelector, Updater }
import tag.@@
trait Lens[S, A] extends LPLens[S, A] { outer =>
def get(s: S): A
def set(s: S)(a: A): S
def modify(s: S)(f: A => A): S = set(s)(f(get(s)))
def compose[T](g: Lens[T, S]) = new Lens[T, A] {
def get(t: T): A = outer.get(g.get(t))
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def compose[T](g: Prism[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = g.get(t).map(outer.get)
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def >>(n: Nat)(implicit mkLens: MkNthFieldLens[A, n.N]): Lens[S, mkLens.Elem] = mkLens() compose this
def >>(k: Witness)(implicit mkLens: MkFieldLens[A, k.T]): Lens[S, mkLens.Elem] = mkLens() compose this
def selectDynamic(k: String)
(implicit mkLens: MkSelectDynamicOptic[Lens[S, A], A, Symbol @@ k.type, Nothing]): mkLens.Out = mkLens(this)
def apply[B](implicit mkPrism: MkCtorPrism[A, B]): Prism[S, B] = mkPrism() compose this
def unapply(s: S): Option[A] = Some(get(s))
def ~[B](other: Lens[S, B]) = new ProductLensBuilder[S, (A, B)] {
def get(s: S): (A, B) = (outer.get(s), other.get(s))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
def ~[B](other: Prism[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] = other.get(s).map((outer.get(s), _))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
}
trait LPLens[S, A] extends Dynamic with Serializable { self: Lens[S, A] =>
def selectDynamic[B](k: String)
(implicit mkLens: MkSelectDynamicOptic[Lens[S, A], A, Symbol @@ k.type, B], dummy: DummyImplicit): mkLens.Out = mkLens(this)
}
trait Prism[S, A] extends LPPrism[S, A] { outer =>
def get(s: S): Option[A]
def set(s: S)(a: A): S
def modify(s: S)(f: A => A): S = get(s).map(f).map(a => set(s)(a)).getOrElse(s)
def compose[T](g: Lens[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = outer.get(g.get(t))
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def compose[T](g: Prism[T, S]) = new Prism[T, A] {
def get(t: T): Option[A] = g.get(t).flatMap(outer.get)
def set(t: T)(a: A): T = g.modify(t)(outer.set(_)(a))
}
def selectDynamic(k: String)
(implicit mkPrism: MkSelectDynamicOptic[Prism[S, A], A, Symbol @@ k.type, Nothing]): mkPrism.Out = mkPrism(this)
def apply[B](implicit mkPrism: MkCtorPrism[A, B]): Prism[S, B] = mkPrism() compose this
def unapply(s: S): Option[A] = get(s)
def ~[B](other: Lens[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] = outer.get(s).map((_, other.get(s)))
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
def ~[B](other: Prism[S, B]) = new ProductPrismBuilder[S, (A, B)] {
def get(s: S): Option[(A, B)] =
for {
fst <- outer.get(s)
snd <- other.get(s)
} yield (fst, snd)
def set(s: S)(ab: (A, B)) = other.set(outer.set(s)(ab._1))(ab._2)
}
}
trait LPPrism[S, A] extends Dynamic with Serializable { self: Prism[S, A] =>
def selectDynamic[B](k: String)
(implicit mkPrism: MkSelectDynamicOptic[Prism[S, A], A, Symbol @@ k.type, B], dummy: DummyImplicit): mkPrism.Out = mkPrism(this)
}
trait ProductLensBuilder[C, P <: Product] extends Lens[C, P] with Serializable {
outer =>
def ~[T, L <: HList, LT <: HList, Q <: Product, QL <: HList](other: Lens[C, T])
(implicit
genp: Generic.Aux[P, L],
tpp: Tupler.Aux[L, P],
pre: Prepend.Aux[L, T :: HNil, LT],
tpq: Tupler.Aux[LT, Q],
genq: Generic.Aux[Q, QL],
init: Init.Aux[QL, L],
last: Last.Aux[QL, T]) =
new ProductLensBuilder[C, Q] {
def get(c: C): Q = (genp.to(outer.get(c)) :+ other.get(c)).tupled
def set(c: C)(q: Q) = {
val l = genq.to(q)
other.set(outer.set(c)(l.init.tupled))(l.last)
}
}
}
trait ProductPrismBuilder[C, P <: Product] extends Prism[C, P] with Serializable {
outer =>
def ~[T, L <: HList, LT <: HList, Q <: Product, QL <: HList](other: Prism[C, T])
(implicit
genp: Generic.Aux[P, L],
tpp: Tupler.Aux[L, P],
pre: Prepend.Aux[L, T :: HNil, LT],
tpq: Tupler.Aux[LT, Q],
genq: Generic.Aux[Q, QL],
init: Init.Aux[QL, L],
last: Last.Aux[QL, T]) =
new ProductPrismBuilder[C, Q] {
def get(c: C): Option[Q] =
for {
init <- outer.get(c)
last <- other.get(c)
} yield (genp.to(init) :+ last).tupled
def set(c: C)(q: Q) = {
val l = genq.to(q)
other.set(outer.set(c)(l.init.tupled))(l.last)
}
}
}
object OpticDefns {
def apply[C] = id[C]
object compose extends Poly2 {
implicit def default[A, B, C] = at[Lens[B, C], Lens[A, B]](_ compose _)
}
class RootLens[C] extends Lens[C, C] {
def apply[P <: HList](path: Path[P])(implicit mkPath: MkPathOptic[C, P]): mkPath.Out = mkPath()
def get(c: C): C = c
def set(c: C)(f: C): C = f
}
def id[C] = new RootLens[C]
def setLens[E](e: E) =
new Lens[Set[E], Boolean] {
def get(s: Set[E]): Boolean = s contains e
def set(s: Set[E])(b: Boolean): Set[E] = if(b) s+e else s-e
}
def mapLens[K, V](k: K) =
new Lens[Map[K, V], Option[V]] {
def get(m: Map[K, V]): Option[V] = m get k
def set(m: Map[K, V])(ov: Option[V]): Map[K, V] = ov match {
case Some(v) => m+(k -> v)
case None => m-k
}
}
def mapPrism[K, V](k: K) =
new Prism[Map[K, V], V] {
def get(m: Map[K, V]): Option[V] = m get k
def set(m: Map[K, V])(v: V): Map[K, V] = m+(k -> v)
}
def hlistSelectLens[L <: HList, U](implicit mkLens: MkHListSelectLens[L, U]) = mkLens()
def coproductSelectPrism[C <: Coproduct, T](implicit mkPrism: MkCoproductSelectPrism[C, T]) = mkPrism()
def hlistNthLens[L <: HList, N <: Nat](implicit mkLens: MkHListNthLens[L, N]) = mkLens()
def recordLens[R <: HList](k: Witness)(implicit mkLens: MkRecordSelectLens[R, k.T]) = mkLens()
}
trait OpticComposer[L, R] extends Serializable {
type Out
def apply(l: L, r: R): Out
}
object OpticComposer {
type Aux[L, R, Out0] = OpticComposer[L, R] { type Out = Out0 }
implicit def composeLL[S, A, T]: Aux[Lens[S, A], Lens[T, S], Lens[T, A]] =
new OpticComposer[Lens[S, A], Lens[T, S]] {
type Out = Lens[T, A]
def apply(l: Lens[S, A], r: Lens[T, S]): Lens[T, A] = l compose r
}
implicit def composeLP[S, A, T]: Aux[Lens[S, A], Prism[T, S], Prism[T, A]] =
new OpticComposer[Lens[S, A], Prism[T, S]] {
type Out = Prism[T, A]
def apply(l: Lens[S, A], r: Prism[T, S]): Prism[T, A] = l compose r
}
implicit def composePL[S, A, T]: Aux[Prism[S, A], Lens[T, S], Prism[T, A]] =
new OpticComposer[Prism[S, A], Lens[T, S]] {
type Out = Prism[T, A]
def apply(l: Prism[S, A], r: Lens[T, S]): Prism[T, A] = l compose r
}
implicit def composePP[S, A, T]: Aux[Prism[S, A], Prism[T, S], Prism[T, A]] =
new OpticComposer[Prism[S, A], Prism[T, S]] {
type Out = Prism[T, A]
def apply(l: Prism[S, A], r: Prism[T, S]): Prism[T, A] = l compose r
}
}
trait MkFieldLens[A, K] extends Serializable {
type Elem
def apply(): Lens[A, Elem]
}
object MkFieldLens {
type Aux[A, K, Elem0] = MkFieldLens[A, K] { type Elem = Elem0 }
implicit def mkFieldLens[A, K, R <: HList, B]
(implicit
mkGen: MkLabelledGenericLens.Aux[A, R],
mkLens: MkRecordSelectLens[R, K]): Aux[A, K, mkLens.Elem] =
new MkFieldLens[A, K] {
type Elem = mkLens.Elem
def apply(): Lens[A, mkLens.Elem] = mkLens() compose mkGen()
}
}
trait MkNthFieldLens[A, N <: Nat] extends Serializable {
type Elem
def apply(): Lens[A, Elem]
}
object MkNthFieldLens {
type Aux[A, N <: Nat, Elem0] = MkNthFieldLens[A, N] { type Elem = Elem0 }
implicit def mkGenPNth[A, N <: Nat, R <: HList, B]
(implicit
mkGen: MkGenericLens.Aux[A, R],
mkLens: MkHListNthLens[R, N]): Aux[A, N, mkLens.Elem] =
new MkNthFieldLens[A, N] {
type Elem = mkLens.Elem
def apply(): Lens[A, mkLens.Elem] = mkLens() compose mkGen()
}
}
trait MkCtorPrism[A, B] extends Serializable {
def apply(): Prism[A, B]
}
object MkCtorPrism {
implicit def mkCtorPrism[A, R <: Coproduct, B]
(implicit
mkGen: MkGenericLens.Aux[A, R],
mkPrism: MkCoproductSelectPrism[R, B]): MkCtorPrism[A, B] =
new MkCtorPrism[A, B] {
def apply(): Prism[A, B] = mkPrism() compose mkGen()
}
}
trait InferProduct[C <: Coproduct, K] extends Serializable {
type Prod
}
object InferProduct {
type Aux[C <: Coproduct, K, P] = InferProduct[C, K] { type Prod = P }
implicit def inferProduct1[P, R <: HList, T <: Coproduct, K]
(implicit gen: LabelledGeneric.Aux[P, R], sel: RSelector[R, K]): Aux[P :+: T, K, P] =
new InferProduct[P :+: T, K] {
type Prod = P
}
implicit def inferProduct2[H, T <: Coproduct, K, P](implicit it: Aux[T, K, P]): Aux[H :+: T, K, P] =
new InferProduct[H :+: T, K] {
type Prod = P
}
}
trait MkSelectDynamicOptic[R, A, K, B] extends Serializable {
type Out
def apply(r: R): Out
}
trait LowPriorityMkSelectDynamicOptic {
type Aux[R, A, K, B, Out0] = MkSelectDynamicOptic[R, A, K, B] { type Out = Out0 }
implicit def mkInferCtorSelField[R, A, C <: Coproduct, I, K, E]
(implicit
gen: Generic.Aux[A, C],
infer: InferProduct.Aux[C, K, I],
mkCSel: MkCtorPrism[A, I],
mkPSel: MkFieldLens.Aux[I, K, E],
compose: OpticComposer[Prism[A, E], R]
): Aux[R, A, K, Nothing, compose.Out] =
new MkSelectDynamicOptic[R, A, K, Nothing] {
type Out = compose.Out
def apply(r: R): Out = compose(mkPSel() compose mkCSel(), r)
}
implicit def mkSelFieldCtor[R, A, K, B, C]
(implicit
mkPSel: MkFieldLens.Aux[A, K, C],
mkCSel: MkCtorPrism[C, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[R, A, K, B, compose.Out] =
new MkSelectDynamicOptic[R, A, K, B] {
type Out = compose.Out
def apply(r: R): Out = compose(mkCSel() compose mkPSel(), r)
}
}
object MkSelectDynamicOptic extends LowPriorityMkSelectDynamicOptic {
implicit def mkSelField[R, A, K, E]
(implicit
mkLens: MkFieldLens.Aux[A, K, E],
compose: OpticComposer[Lens[A, E], R]
): Aux[R, A, K, Nothing, compose.Out] =
new MkSelectDynamicOptic[R, A, K, Nothing] {
type Out = compose.Out
def apply(r: R): Out = compose(mkLens(), r)
}
implicit def mkSelCtor[R, A, B]
(implicit
mkPrism: MkCtorPrism[A, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[R, A, Nothing, B, compose.Out] =
new MkSelectDynamicOptic[R, A, Nothing, B] {
type Out = compose.Out
def apply(r: R): Out = compose(mkPrism(), r)
}
}
trait MkGenericLens[T] extends Serializable {
type Repr
def apply(): Lens[T, Repr]
}
object MkGenericLens {
type Aux[T, Repr0] = MkGenericLens[T] { type Repr = Repr0 }
implicit def mkGenericLens[T](implicit gen: Generic[T]): Aux[T, gen.Repr] =
new MkGenericLens[T] {
type Repr = gen.Repr
def apply(): Lens[T, Repr] =
new Lens[T, Repr] {
def get(t: T): Repr = gen.to(t)
def set(t: T)(r: Repr): T = gen.from(r)
}
}
}
trait MkLabelledGenericLens[T] extends Serializable {
type Repr
def apply(): Lens[T, Repr]
}
object MkLabelledGenericLens {
type Aux[T, Repr0] = MkLabelledGenericLens[T] { type Repr = Repr0 }
implicit def mkLabelledGenericLens[T](implicit gen: LabelledGeneric[T]): Aux[T, gen.Repr] =
new MkLabelledGenericLens[T] {
type Repr = gen.Repr
def apply(): Lens[T, Repr] =
new Lens[T, Repr] {
def get(t: T): Repr = gen.to(t)
def set(t: T)(r: Repr): T = gen.from(r)
}
}
}
trait MkHListNthLens[L <: HList, N <: Nat] extends Serializable {
type Elem
def apply(): Lens[L, Elem]
}
object MkHListNthLens {
type Aux[L <: HList, N <: Nat, Elem0] = MkHListNthLens[L, N] { type Elem = Elem0 }
implicit def mkHListNthLens[L <: HList, N <: Nat, E]
(implicit atx: At.Aux[L, N, E], replace: ReplaceAt.Aux[L, N, E, (E, L)]): Aux[L, N, E] =
new MkHListNthLens[L, N] {
type Elem = E
def apply(): Lens[L, E] =
new Lens[L, E] {
def get(l: L): E = l[N]
def set(l: L)(e: E): L = l.updatedAt[N](e)
}
}
}
trait MkHListSelectLens[L <: HList, U] extends Serializable {
def apply(): Lens[L, U]
}
object MkHListSelectLens {
implicit def mKHlistSelectLens[L <: HList, U]
(implicit selector: Selector[L, U], replacer: Replacer.Aux[L, U, U, (U, L)]): MkHListSelectLens[L, U] =
new MkHListSelectLens[L, U] {
def apply(): Lens[L, U] =
new Lens[L, U] {
def get(l: L) = selector(l)
def set(l: L)(u: U) = replacer(l, u)._2
}
}
}
trait MkCoproductSelectPrism[C <: Coproduct, T] extends Serializable {
def apply(): Prism[C, T]
}
object MkCoproductSelectPrism {
implicit def mKCoproductSelectPrism[C <: Coproduct, T]
(implicit selector: CSelector[C, T], injector: Inject[C, T]): MkCoproductSelectPrism[C, T] =
new MkCoproductSelectPrism[C, T] {
def apply(): Prism[C, T] =
new Prism[C, T] {
def get(c: C): Option[T] = selector(c)
def set(c: C)(t: T): C = injector(t)
}
}
}
trait MkRecordSelectLens[R <: HList, K] extends Serializable {
type Elem
def apply(): Lens[R, Elem]
}
object MkRecordSelectLens {
type Aux[R <: HList, K, Elem0] = MkRecordSelectLens[R, K] { type Elem = Elem0 }
implicit def mkRecordSelectLens[R <: HList, K, E]
(implicit selector: RSelector.Aux[R, K, E], updater: Updater.Aux[R, FieldType[K, E], R]): Aux[R, K, E] =
new MkRecordSelectLens[R, K] {
type Elem = E
def apply(): Lens[R, E] =
new Lens[R, E] {
def get(r: R) = selector(r)
def set(r: R)(e: E) = updater(r, field[K](e))
}
}
}
trait MkPathOptic[S, P <: HList] extends Serializable {
type Out
type Elem
def apply(): Out
}
trait LowPriorityMkPathOptic {
type Aux[S, P <: HList, Out0, E0] = MkPathOptic[S, P] { type Out = Out0 ; type Elem = E0 }
type Aux1[S, P <: HList, Out0] = MkPathOptic[S, P] { type Out = Out0 }
implicit def mkCoselSelPathOptic[S, P <: HList, K, A, C <: Coproduct, I, E, R]
(implicit
mkPrefix: Aux[S, P, R, A],
gen: Generic.Aux[A, C],
infer: InferProduct.Aux[C, K, I],
mkPrism: MkCtorPrism[A, I],
mkLens: MkFieldLens.Aux[I, K, E],
compose: OpticComposer[Prism[A, E], R]
): Aux[S, Select[K] :: P, compose.Out, E] =
new MkPathOptic[S, Select[K] :: P] {
type Out = compose.Out
type Elem = E
def apply(): compose.Out = compose(mkLens() compose mkPrism(), mkPrefix())
}
}
object MkPathOptic extends LowPriorityMkPathOptic {
implicit def mkHNilPathLens[S]: Aux[S, HNil, Lens[S, S], S] =
new MkPathOptic[S, HNil] {
type Out = Lens[S, S]
type Elem = S
def apply(): Lens[S, S] = lens[S]
}
implicit def mkSelPathOptic[S, P <: HList, K, A, E, R]
(implicit
mkPrefix: Aux[S, P, R, A],
mkLens: MkFieldLens.Aux[A, K, E],
compose: OpticComposer[Lens[A, E], R]
): Aux[S, Select[K] :: P, compose.Out, E] =
new MkPathOptic[S, Select[K] :: P] {
type Out = compose.Out
type Elem = E
def apply(): compose.Out = compose(mkLens(), mkPrefix())
}
implicit def mkCoselPathOptic[S, P <: HList, B, A, R]
(implicit
mkPrefix: Aux[S, P, R, A],
mkPrism: MkCtorPrism[A, B],
compose: OpticComposer[Prism[A, B], R]
): Aux[S, Coselect[B] :: P, compose.Out, B] =
new MkPathOptic[S, Coselect[B] :: P] {
type Out = compose.Out
type Elem = B
def apply(): compose.Out = compose(mkPrism(), mkPrefix())
}
}
trait Select[T]
trait Coselect[T]
trait Segment[P, S, T <: HList] {
type Out <: HList
}
trait LowPrioritySegment {
type Aux[P, S, T <: HList, Out0 <: HList] = Segment[P, S, T] { type Out = Out0 }
implicit def two[P, S, T <: HList]: Aux[P, S, T, Coselect[S] :: Select[Symbol @@ P] :: T] = new Segment[P, S, T] {
type Out = Coselect[S] :: Select[Symbol @@ P] :: T
}
}
object Segment extends LowPrioritySegment {
implicit def one[P, T <: HList]: Aux[P, Nothing, T, Select[Symbol @@ P] :: T] = new Segment[P, Nothing, T] {
type Out = Select[Symbol @@ P] :: T
}
}
trait Path[T <: HList] extends LPPath[T] {
type P = Path[T]
type L = T
type Lens[T, E] = MkPathOptic.Aux1[T, L, shapeless.Lens[T, E]]
type Prism[T, E] = MkPathOptic.Aux1[T, L, shapeless.Prism[T, E]]
def apply[H]: Path[Coselect[H] :: T] = new Path[Coselect[H] :: T] {}
def selectDynamic(h: String)(implicit segment: Segment[h.type, Nothing, T]): Path[segment.Out] =
new Path[segment.Out] {}
}
trait LPPath[T <: HList] extends Dynamic { self: Path[T] =>
def selectDynamic[H](h: String)(implicit segment: Segment[h.type, H, T], dummy: DummyImplicit): Path[segment.Out] =
new Path[segment.Out] {}
}
object Path extends Path[HNil]
| rorygraves/perf_tester | corpus/shapeless/src/main/scala/shapeless/lenses.scala | Scala | apache-2.0 | 18,085 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.exceptions.StackDepthExceptionHelper.getStackDepthFun
import org.scalatest.exceptions.TestFailedException
/**
* Trait that provides an implicit conversion that adds <code>left.value</code> and <code>right.value</code> methods
* to <code>Either</code>, which will return the selected value of the <code>Either</code> if defined,
* or throw <code>TestFailedException</code> if not.
*
* <p>
* This construct allows you to express in one statement that an <code>Either</code> should be <em>left</em> or <em>right</em>
* and that its value should meet some expectation. Here's are some examples:
* </p>
*
* <pre class="stHighlight">
* either1.right.value should be > 9
* either2.left.value should be ("Muchas problemas")
* </pre>
*
* <p>
* Or, using assertions instead of matcher expressions:
* </p>
*
* <pre class="stHighlight">
* assert(either1.right.value > 9)
* assert(either2.left.value === "Muchas problemas")
* </pre>
*
* <p>
* Were you to simply invoke <code>right.get</code> or <code>left.get</code> on the <code>Either</code>,
* if the <code>Either</code> wasn't defined as expected (<em>e.g.</em>, it was a <code>Left</code> when you expected a <code>Right</code>), it
* would throw a <code>NoSuchElementException</code>:
* </p>
*
* <pre class="stHighlight">
* val either: Either[String, Int] = Left("Muchas problemas")
*
* either.right.get should be > 9 // either.right.get throws NoSuchElementException
* </pre>
*
* <p>
* The <code>NoSuchElementException</code> would cause the test to fail, but without providing a <a href="exceptions/StackDepth.html">stack depth</a> pointing
* to the failing line of test code. This stack depth, provided by <a href="exceptions/TestFailedException.html"><code>TestFailedException</code></a> (and a
* few other ScalaTest exceptions), makes it quicker for
* users to navigate to the cause of the failure. Without <code>EitherValues</code>, to get
* a stack depth exception you would need to make two statements, like this:
* </p>
*
* <pre class="stHighlight">
* val either: Either[String, Int] = Left("Muchas problemas")
*
* either should be ('right) // throws TestFailedException
* either.right.get should be > 9
* </pre>
*
* <p>
* The <code>EitherValues</code> trait allows you to state that more concisely:
* </p>
*
* <pre class="stHighlight">
* val either: Either[String, Int] = Left("Muchas problemas")
*
* either.right.value should be > 9 // either.right.value throws TestFailedException
* </pre>
*/
trait EitherValues {
import scala.language.implicitConversions
/**
* Implicit conversion that adds a <code>value</code> method to <code>LeftProjection</code>.
*
* @param either the <code>LeftProjection</code> on which to add the <code>value</code> method
*/
implicit def convertLeftProjectionToValuable[L, R](leftProj: Either.LeftProjection[L, R]) = new LeftValuable(leftProj)
/**
* Implicit conversion that adds a <code>value</code> method to <code>RightProjection</code>.
*
* @param either the <code>RightProjection</code> on which to add the <code>value</code> method
*/
implicit def convertRightProjectionToValuable[L, R](rightProj: Either.RightProjection[L, R]) = new RightValuable(rightProj)
/**
* Wrapper class that adds a <code>value</code> method to <code>LeftProjection</code>, allowing
* you to make statements like:
*
* <pre class="stHighlight">
* either.left.value should be > 9
* </pre>
*
* @param leftProj A <code>LeftProjection</code> to convert to <code>LeftValuable</code>, which provides the
* <code>value</code> method.
*/
class LeftValuable[L, R](leftProj: Either.LeftProjection[L, R]) {
/**
* Returns the <code>Left</code> value contained in the wrapped <code>LeftProjection</code>, if defined as a <code>Left</code>, else throws <code>TestFailedException</code> with
* a detail message indicating the <code>Either</code> was defined as a <code>Right</code>, not a <code>Left</code>.
*/
def value: L = {
try {
leftProj.get
}
catch {
case cause: NoSuchElementException =>
throw new TestFailedException(sde => Some(Resources.eitherLeftValueNotDefined), Some(cause), getStackDepthFun("EitherValues.scala", "value"))
}
}
}
/**
* Wrapper class that adds a <code>value</code> method to <code>RightProjection</code>, allowing
* you to make statements like:
*
* <pre class="stHighlight">
* either.right.value should be > 9
* </pre>
*
* @param rightProj A <code>RightProjection</code> to convert to <code>RightValuable</code>, which provides the
* <code>value</code> method.
*/
class RightValuable[L, R](rightProj: Either.RightProjection[L, R]) {
/**
* Returns the <code>Right</code> value contained in the wrapped <code>RightProjection</code>, if defined as a <code>Right</code>, else throws <code>TestFailedException</code> with
* a detail message indicating the <code>Either</code> was defined as a <code>Right</code>, not a <code>Left</code>.
*/
def value: R = {
try {
rightProj.get
}
catch {
case cause: NoSuchElementException =>
throw new TestFailedException(sde => Some(Resources.eitherRightValueNotDefined), Some(cause), getStackDepthFun("EitherValues.scala", "value"))
}
}
}
}
/**
* Companion object that facilitates the importing of <code>ValueEither</code> members as
* an alternative to mixing it in. One use case is to import <code>EitherValues</code>'s members so you can use
* <code>left.value</code> and <code>right.value</code> on <code>Either</code> in the Scala interpreter:
*
* <pre class="stREPL">
* $ scala -cp scalatest-1.7.jar
* Welcome to Scala version 2.9.1.final (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_29).
* Type in expressions to have them evaluated.
* Type :help for more information.
*
* scala> import org.scalatest._
* import org.scalatest._
*
* scala> import matchers.Matchers._
* import matchers.Matchers._
*
* scala> import EitherValues._
* import EitherValues._
*
* scala> val e: Either[String, Int] = Left("Muchas problemas")
* e: Either[String,Int] = Left(Muchas problemas)
*
* scala> e.left.value should be ("Muchas problemas")
*
* scala> e.right.value should be < 9
* org.scalatest.TestFailedException: The Either on which rightValue was invoked was not defined.
* at org.scalatest.EitherValues$RightValuable.value(EitherValues.scala:148)
* at .<init>(<console>:18)
* ...
* </pre>
*/
object EitherValues extends EitherValues
| SRGOM/scalatest | scalatest/src/main/scala/org/scalatest/EitherValues.scala | Scala | apache-2.0 | 7,313 |
/**
* Copyright 2010-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package examples
object PropSpecExamples extends StyleTraitExamples {
val name: String = "PropSpec"
val description: String = """PropSpec is perfect for teams that want to write tests exclusively in terms of property checks; also a good choice for writing the occasional test matrix when a different style trait is chosen as the main unit testing style."""
/*
val exampleUsage: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stImport">import prop._</span>
|<span class="stImport">import scala.collection.immutable._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span> <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| <span class="stReserved">val</span> examples =
| <span class="stType">Table</span>("set",
| <span class="stType">BitSet</span>.empty,
| <span class="stType">HashSet</span>.empty[<span class="stType">Int</span>],
| <span class="stType">TreeSet</span>.empty[<span class="stType">Int</span>])
| property(<span class="stLiteral">"an empty Set should have size 0"</span>) {
| forAll(examples) { set => set.size should be (<span class="stLiteral">0</span>) }
| }
| property(<span class="stLiteral">"invoke head on an empty set should produce NoSuchElementException"</span>) {
| forAll(examples) {
| set => evaluating { set.head } should produce [<span class="stType">NoSuchElementException</span>]
| }
| }
|} """.stripMargin
*/
val exampleUsage: String =
"""<span class="stImport">import org.scalatest._</span>
|
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> {
| <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = { <span class="stExplain">// Define a shared fixture</span>
| <span class="stExplain">// Shared setup (run at beginning of each test)</span>
| <span class="stReserved">try</span> test()
| <span class="stReserved">finally</span> {
| <span class="stExplain">// Shared cleanup (run at end of each test)</span>
| }
| }
| <span class="stExplain">// Define tests with 'property', a test name string in parentheses,
| // and test body in curly braces</span>
| property(<span class="stLiteral">"An empty Set should have size 0"</span>) {
| assert(<span class="stType">Set</span>.empty.size == <span class="stLiteral">0</span>)
| }
| <span class="stExplain">// To ignore a test, change 'property' to 'ignore'</span>
| ignore(<span class="stLiteral">"Invoking head on an empty Set should produce NoSuchElementException"</span>) {
| intercept[<span class="stType">NoSuchElementException</span>] {
| <span class="stType">Set</span>.empty.head
| }
| }
| <span class="stExplain">// Define a pending test by using (pending) for the body</span>
| property(<span class="stLiteral">"An empty Set's isEmpty method should return false"</span>) (pending)
| <span class="stExplain">// Tag a test by placing a tag object after the test name</span>
| <span class="stImport">import tagobjects.Slow</span>
| property(<span class="stLiteral">"An empty Set's nonEmpty method should return true"</span>, Slow) {
| assert(!<span class="stType">Set</span>.empty.nonEmpty)
| }
|}
|
|<span class="stExplain">// Can also pass fixtures into tests with fixture.PropSpec</span>
|<span class="stReserved">class</span> <span class="stType">StringSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.PropSpec</span> {
| <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span> <span class="stExplain">// Define the type of the passed fixture object</span>
| <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = {
| <span class="stExplain">// Shared setup (run before each test), including...</span>
| <span class="stReserved">val</span> fixture = <span class="stLiteral">"a fixture object"</span> <span class="stExplain">// ...creating a fixture object</span>
| <span class="stReserved">try</span> test(fixture) <span class="stExplain">// Pass the fixture into the test</span>
| <span class="stReserved">finally</span> {
| <span class="stExplain">// Shared cleanup (run at end of each test)</span>
| }
| }
| property(<span class="stLiteral">"The passed fixture can be used in the test"</span>) { s => <span class="stExplain">// Fixture passed in as s</span>
| assert(s == <span class="stLiteral">"a fixture object"</span>)
| }
|}
|
|@DoNotDiscover <span class="stExplain">// Disable discovery of a test class</span>
|<span class="stReserved">class</span> <span class="stType">InvisibleSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|
|@Ignore <span class="stExplain">// Ignore all tests in a test class</span>
|<span class="stReserved">class</span> <span class="stType">IgnoredSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|
|<span class="stImport">import tags.Slow</span>
|@Slow <span class="stExplain">// Mark all tests in a test class with a tag</span>
|<span class="stReserved">class</span> <span class="stType">SlowSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|""".stripMargin
val play2Example: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stImport">import play.api.test._</span>
|<span class="stImport">import play.api.test.Helpers._</span>
|
|<span class="stReserved">class</span> <span class="stType">ExampleSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| property(<span class="stLiteral">"Application should send 404 on a bad request"</span>) {
| running(<span class="stType">FakeApplication</span>()) {
| route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/boum"</span>)) shouldBe <span class="stType">None</span>
| }
| }
| property(<span class="stLiteral">"Application should render the index page"</span>) {
| running(<span class="stType">FakeApplication</span>()) {
| <span class="stReserved">val</span> home = route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/"</span>)).get
| status(home) shouldBe OK
| contentType(home) shouldBe <span class="stType">Some</span>(<span class="stLiteral">"text/html"</span>)
| contentAsString(home) should include (<span class="stLiteral">"ScalaTest"</span>)
| }
| }
|}""".stripMargin
val doNotDiscover: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|@DoNotDiscover
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> { <span class="stBlockComment">/*code omitted*/</span> }
""".stripMargin
val ignoreTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| ignore(<span class="stLiteral">"an empty Set should have size 0"</span>) { <span class="stBlockComment">/*code omitted*/</span> }
|}""".stripMargin
val pendingTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| property(<span class="stLiteral">"an empty Set should have size 0"</span>) (pending)
|}""".stripMargin
val taggingTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">object</span> <span class="stType">SlowTest</span> <span class="stReserved">extends</span> <span class="stType">Tag</span>(<span class="stLiteral">"com.mycompany.tags.SlowTest"</span>)
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| property(<span class="stLiteral">"an empty Set should have size 0"</span>, <span class="stType">SlowTest</span>) {
| <span class="stBlockComment">/*code omitted*/</span>
| }
|}""".stripMargin
val infoTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| property(<span class="stLiteral">"an empty Set should have size 0"</span>) {
| info(<span class="stLiteral">"Some information."</span>)
| <span class="stBlockComment">/*code omitted*/</span>
| }
|}""".stripMargin
val fixtureNoArgTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = {
| setup()
| <span class="stReserved">try</span> test() <span class="stReserved">finally</span> cleanup()
| }
|}""".stripMargin
val fixtureOneArgTest: String =
"""<span class="stImport">import org.scalatest._
|import prop._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span>
| <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = {
| setup()
| <span class="stReserved">try</span> test(<span class="stLiteral">"this is a fixture param"</span>) <span class="stReserved">finally</span> cleanup()
| }
|}""".stripMargin
val seleniumExample: String =
"""<span class="stImport">import org.scalatest._
|import prop._
|import selenium._</span>
|<span class="stReserved">class</span> <span class="stType">BlogSpec</span> <span class="stReserved">extends</span> <span class="stType">PropSpec</span> <span class="stReserved">with</span>
| <span class="stType">TableDrivenPropertyChecks</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> <span class="stReserved">with</span>
| <span class="stType">WebBrowser</span> <span class="stReserved">with</span> <span class="stType">HtmlUnit</span> {
| <span class="stReserved">val</span> host = <span class="stLiteral">"http://localhost:9000/"</span>
| property(<span class="stLiteral">"The blog app home page should have the correct title"</span>) {
| go to (host + <span class="stLiteral">"index.html"</span>)
| pageTitle should be (<span class="stLiteral">"Awesome Blog"</span>)
| }
|}""".stripMargin
}
| jedesah/scalatest-website | app/examples/PropSpecExamples.scala | Scala | apache-2.0 | 14,775 |
package lmxml
trait ParsedNode {
val name: String
val children: Seq[ParsedNode]
}
case class EmptyNode(children: Seq[ParsedNode] = Nil) extends ParsedNode {
val name = "[empty]"
}
case class LmxmlNode(
name: String,
attrs: Map[String, String] = Map(),
children: Seq[ParsedNode] = Nil
) extends ParsedNode
case class TextNode(
contents: String,
unescaped: Boolean = false,
children: Seq[ParsedNode] = Nil
) extends ParsedNode {
val name = "[textNode]"
}
case class CommentNode(children: Seq[ParsedNode]) extends ParsedNode {
val name = "[commentNode]"
}
case class TemplateLink(
name: String,
children: Seq[ParsedNode] = Nil
) extends ParsedNode
case class LinkDefinition(
name: String,
children: Seq[ParsedNode] = Nil
) extends ParsedNode
| philcali/lmxml | core/src/main/scala/model.scala | Scala | mit | 776 |
package org.joda.time.format
import java.util.Arrays
import java.util.Locale
import org.joda.time.Chronology
import org.joda.time.DateTimeField
import org.joda.time.DateTimeFieldType
import org.joda.time.DateTimeUtils
import org.joda.time.DateTimeZone
import org.joda.time.DurationField
import org.joda.time.DurationFieldType
import org.joda.time.IllegalFieldValueException
import org.joda.time.IllegalInstantException
import DateTimeParserBucket._
object DateTimeParserBucket {
private def sort(array: Array[SavedField], high: Int) {
if (high > 10) {
Arrays.sort(array.map(_.asInstanceOf[AnyRef]),
0,
high,
array.asInstanceOf[Ordering[Object]])
} else {
for (i <- 0 until high) {
var j = i
while (j > 0 && array(j - 1).compareTo(array(j)) > 0) {
val t = array(j)
array(j) = array(j - 1)
array(j - 1) = t
j -= 1
}
}
}
}
class SavedField() extends Comparable[SavedField] {
var iField: DateTimeField = _
var iValue: Int = _
var iText: String = null
var iLocale: Locale = null
def init(field: DateTimeField, value: Int) {
iField = field
iValue = value
iText = null
iLocale = null
}
def init(field: DateTimeField, text: String, locale: Locale) {
iField = field
iValue = 0
iText = text
iLocale = locale
}
def set(millis: Long, reset: Boolean): Long = {
var _millis: Long = millis
_millis =
if (iText == null) iField.set(_millis, iValue)
else iField.set(_millis, iText, iLocale)
if (reset) {
_millis = iField.roundFloor(_millis)
}
_millis
}
def compareTo(obj: SavedField): Int = {
val other = obj.iField
val result = compareReverse(iField.getRangeDurationField,
other.getRangeDurationField)
if (result != 0) {
return result
}
compareReverse(iField.getDurationField, other.getDurationField)
}
}
def compareReverse(a: DurationField, b: DurationField): Int = {
if (a == null || !a.isSupported) {
if (b == null || !b.isSupported) {
return 0
}
return -1
}
if (b == null || !b.isSupported) {
return 1
}
-a.compareTo(b)
}
}
class DateTimeParserBucket(private val instantLocal: Long,
var chrono: Chronology,
locale: Locale,
private val pivotYear: Integer,
private val defaultYear: Int) {
private var iChrono = chrono.withUTC()
private var iLocale = if (locale == null) Locale.getDefault else locale
private var iDefaultZone = chrono.getZone
private var iZone: DateTimeZone = null
private var iOffset: Integer = null
private var iSavedFields: Array[SavedField] = new Array[SavedField](8)
private var iSavedFieldsCount: Int = _
private var iSavedFieldsShared: Boolean = _
private var iSavedState: AnyRef = null
private var iPivotYear: Integer = null
private var iMillis: Long = _
private var iDefaultYear: Int = _
private var iDefaultPivotYear: Int = _
chrono = DateTimeUtils.getChronology(chrono)
iMillis = instantLocal
iDefaultZone = chrono.getZone()
iChrono = chrono.withUTC()
iLocale = if (locale == null) Locale.getDefault() else locale
iDefaultYear = defaultYear
iDefaultPivotYear = pivotYear
// reset
iZone = iDefaultZone
iPivotYear = iDefaultPivotYear
iSavedFields = new Array[SavedField](8)
@Deprecated
def this(instantLocal: Long, chrono: Chronology, locale: Locale) {
this(instantLocal, chrono, locale, null, 2000)
}
@Deprecated
def this(instantLocal: Long,
chrono: Chronology,
locale: Locale,
pivotYear: Integer) {
this(instantLocal, chrono, locale, pivotYear, 2000)
}
def reset() {
iZone = iDefaultZone
iOffset = null
iPivotYear = iDefaultPivotYear
iSavedFieldsCount = 0
iSavedFieldsShared = false
iSavedState = null
}
def parseMillis(parser: DateTimeParser, text: CharSequence): Long = {
reset()
doParseMillis(DateTimeParserInternalParser.of(parser), text)
}
def doParseMillis(parser: InternalParser, text: CharSequence): Long = {
var newPos = parser.parseInto(this, text, 0)
if (newPos >= 0) {
if (newPos >= text.length) {
return computeMillis(resetFields = true, text)
}
} else {
newPos = ~newPos
}
throw new IllegalArgumentException(
FormatUtils.createErrorMessage(text.toString, newPos))
}
def getChronology(): Chronology = iChrono
def getLocale(): Locale = iLocale
def getZone(): DateTimeZone = iZone
def setZone(zone: DateTimeZone) {
iSavedState = null
iZone = zone
}
@Deprecated
def getOffset(): Int = if (iOffset != null) iOffset else 0
def getOffsetInteger(): Integer = iOffset
@Deprecated
def setOffset(offset: Int) {
iSavedState = null
iOffset = offset
}
def setOffset(offset: Integer) {
iSavedState = null
iOffset = offset
}
def getPivotYear(): Integer = iPivotYear
@Deprecated
def setPivotYear(pivotYear: Integer) {
iPivotYear = pivotYear
}
def saveField(field: DateTimeField, value: Int) {
obtainSaveField().init(field, value)
}
def saveField(fieldType: DateTimeFieldType, value: Int) {
obtainSaveField().init(fieldType.getField(iChrono), value)
}
def saveField(fieldType: DateTimeFieldType, text: String, locale: Locale) {
obtainSaveField().init(fieldType.getField(iChrono), text, locale)
}
private def obtainSaveField(): SavedField = {
var savedFields: Array[SavedField] = iSavedFields
val savedFieldsCount = iSavedFieldsCount
if (savedFieldsCount == savedFields.length || iSavedFieldsShared) {
val newArray = Array.ofDim[SavedField](
if (savedFieldsCount == savedFields.length) savedFieldsCount * 2
else savedFields.length)
System.arraycopy(savedFields, 0, newArray, 0, savedFieldsCount)
iSavedFields = newArray
savedFields = newArray
iSavedFieldsShared = false
}
iSavedState = null
var saved = savedFields(savedFieldsCount)
if (saved == null) {
val s = new SavedField()
saved = s
savedFields(savedFieldsCount) = s
}
iSavedFieldsCount = savedFieldsCount + 1
saved
}
def saveState(): AnyRef = {
if (iSavedState == null) {
iSavedState = new SavedState()
}
iSavedState
}
def restoreState(savedState: AnyRef): Boolean = {
if (savedState.isInstanceOf[SavedState]) {
if (savedState.asInstanceOf[SavedState].restoreState(this)) {
iSavedState = savedState
return true
}
}
false
}
def computeMillis(): Long = {
computeMillis(resetFields = false, null.asInstanceOf[CharSequence])
}
def computeMillis(resetFields: Boolean): Long = {
computeMillis(resetFields, null.asInstanceOf[CharSequence])
}
def computeMillis(resetFields: Boolean, text: String): Long = {
computeMillis(resetFields, text.asInstanceOf[CharSequence])
}
def computeMillis(resetFields: Boolean, text: CharSequence): Long = {
var savedFields = iSavedFields
val count = iSavedFieldsCount
if (iSavedFieldsShared) {
val s = iSavedFields.clone().asInstanceOf[Array[SavedField]]
iSavedFields = s
savedFields = s
iSavedFieldsShared = false
}
sort(savedFields, count)
if (count > 0) {
val months = DurationFieldType.months().getField(iChrono)
val days = DurationFieldType.days().getField(iChrono)
val first = savedFields(0).iField.getDurationField
if (compareReverse(first, months) >= 0 && compareReverse(first, days) <= 0) {
saveField(DateTimeFieldType.year(), iDefaultYear)
return computeMillis(resetFields, text)
}
}
var millis = iMillis
try {
for (i <- 0 until count) {
millis = savedFields(i).set(millis, resetFields)
}
if (resetFields) {
for (i <- 0 until count) {
millis = savedFields(i).set(millis, i == (count - 1))
}
}
} catch {
case e: IllegalFieldValueException =>
if (text != null) {
e.prependMessage("Cannot parse \\"" + text + '"')
}
throw e
}
if (iOffset != null) {
millis -= iOffset
} else if (iZone != null) {
val offset = iZone.getOffsetFromLocal(millis)
millis -= offset
if (offset != iZone.getOffset(millis)) {
var message = "Illegal instant due to time zone offset transition (" +
iZone +
')'
if (text != null) {
message = "Cannot parse \\"" + text + "\\": " + message
}
throw new IllegalInstantException(message)
}
}
millis
}
class SavedState() {
val iZone = DateTimeParserBucket.this.iZone
val iOffset = DateTimeParserBucket.this.iOffset
val iSavedFields = DateTimeParserBucket.this.iSavedFields
val iSavedFieldsCount = DateTimeParserBucket.this.iSavedFieldsCount
def restoreState(enclosing: DateTimeParserBucket): Boolean = {
if (enclosing != DateTimeParserBucket.this) {
return false
}
enclosing.iZone = this.iZone
enclosing.iOffset = this.iOffset
enclosing.iSavedFields = this.iSavedFields
if (this.iSavedFieldsCount < enclosing.iSavedFieldsCount) {
enclosing.iSavedFieldsShared = true
}
enclosing.iSavedFieldsCount = this.iSavedFieldsCount
true
}
}
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/format/DateTimeParserBucket.scala | Scala | bsd-2-clause | 9,632 |
package com.faacets.qalg.indup
package algebra
trait Size2[T2] extends Any with Size[T2, IntInt] {
def dims = 2
def size0(t: T2): Int
def size1(t: T2): Int
}
| denisrosset/qalg | indup/src/main/scala/qalg/indup/algebra/Size2.scala | Scala | mit | 165 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit, ThreadPoolExecutor}
import akka.pattern._
import akka.util.Timeout
import kafka.manager.utils.zero81.{ReassignPartitionCommand, PreferredReplicaLeaderElectionCommand}
import org.apache.curator.framework.CuratorFramework
import kafka.manager.utils.{AdminUtils, ZkUtils}
import scala.concurrent.{Future, ExecutionContext}
import scala.concurrent.duration._
import scala.util.{Failure, Try}
/**
* @author hiral
*/
import ActorModel._
case class KafkaCommandActorConfig(curator: CuratorFramework,
longRunningPoolConfig: LongRunningPoolConfig,
askTimeoutMillis: Long = 400,
version: KafkaVersion)
class KafkaCommandActor(kafkaCommandActorConfig: KafkaCommandActorConfig) extends BaseCommandActor with LongRunningPoolActor {
//private[this] val askTimeout: Timeout = kafkaCommandActorConfig.askTimeoutMillis.milliseconds
private[this] val adminUtils = new AdminUtils(kafkaCommandActorConfig.version)
private[this] val reassignPartitionCommand = new ReassignPartitionCommand(adminUtils)
@scala.throws[Exception](classOf[Exception])
override def preStart() = {
log.info("Started actor %s".format(self.path))
}
@scala.throws[Exception](classOf[Exception])
override def preRestart(reason: Throwable, message: Option[Any]) {
log.error(reason, "Restarting due to [{}] when processing [{}]",
reason.getMessage, message.getOrElse(""))
super.preRestart(reason, message)
}
@scala.throws[Exception](classOf[Exception])
override def postStop(): Unit = {
super.postStop()
}
override protected def longRunningPoolConfig: LongRunningPoolConfig = kafkaCommandActorConfig.longRunningPoolConfig
override protected def longRunningQueueFull(): Unit = {
sender ! KCCommandResult(Try(throw new UnsupportedOperationException("Long running executor blocking queue is full!")))
}
override def processActorResponse(response: ActorResponse): Unit = {
response match {
case any: Any => log.warning("kca : processActorResponse : Received unknown message: {}", any)
}
}
override def processCommandRequest(request: CommandRequest): Unit = {
implicit val ec = longRunningExecutionContext
request match {
case KCDeleteTopic(topic) =>
kafkaCommandActorConfig.version match {
case Kafka_0_8_1_1 =>
val result : KCCommandResult = KCCommandResult(Failure(new UnsupportedOperationException(
s"Delete topic not supported for kafka version ${kafkaCommandActorConfig.version}")))
sender ! result
case Kafka_0_8_2_0 | Kafka_0_8_2_1 =>
longRunning {
Future {
KCCommandResult(Try {
adminUtils.deleteTopic(kafkaCommandActorConfig.curator, topic) //this should work in 0.8.2
kafkaCommandActorConfig.curator.delete().deletingChildrenIfNeeded().forPath(ZkUtils.getTopicPath(topic))
})
}
}
}
case KCCreateTopic(topic, brokers, partitions, replicationFactor, config) =>
longRunning {
Future {
KCCommandResult(Try {
adminUtils.createTopic(kafkaCommandActorConfig.curator, brokers, topic, partitions, replicationFactor, config)
})
}
}
case KCAddTopicPartitions(topic, brokers, partitions, partitionReplicaList, readVersion) =>
longRunning {
Future {
KCCommandResult(Try {
adminUtils.addPartitions(kafkaCommandActorConfig.curator, topic, partitions, partitionReplicaList, brokers, readVersion)
})
}
}
case KCUpdateTopicConfig(topic, config, readVersion) =>
longRunning {
Future {
KCCommandResult(Try {
adminUtils.changeTopicConfig(kafkaCommandActorConfig.curator, topic, config, readVersion)
})
}
}
case KCPreferredReplicaLeaderElection(topicAndPartition) =>
longRunning {
log.info("Running replica leader election : {}", topicAndPartition)
Future {
KCCommandResult(
Try {
PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(kafkaCommandActorConfig.curator, topicAndPartition)
}
)
}
}
case KCReassignPartition(current, generated) =>
longRunning {
log.info("Running reassign partition from {} to {}", current, generated)
Future {
KCCommandResult(
reassignPartitionCommand.executeAssignment(kafkaCommandActorConfig.curator, current, generated)
)
}
}
case any: Any => log.warning("kca : processCommandRequest : Received unknown message: {}", any)
}
}
}
| LastManStanding/kafka-manager | app/kafka/manager/KafkaCommandActor.scala | Scala | apache-2.0 | 5,086 |
package io.protoless.messages.streams
import com.google.protobuf.CodedOutputStream
import io.protoless.fields.FieldEncoder
/**
* Wrapper around protobuf `CodedOuputStream` introducing runtime methods to write fields
* in a protobuf stream.
*/
final class ProtolessOutputStream(private val output: CodedOutputStream) {
private val lastIndex = new java.util.concurrent.atomic.AtomicInteger(0)
/**
* Write the field `A` into field number `index`.
*/
final def write[A](a: A, index: Int)(implicit encoder: FieldEncoder[A]): Unit = {
assert(index > lastIndex.get(),
s"You cannot write fields in reverse order. Last index written was ${lastIndex.get()}, "+
s"and you tried to write at position $index (${encoder.toString}")
lastIndex.set(index)
encoder.write(index, a, output)
}
final def write[A](a: A)(implicit encoder: FieldEncoder[A]): Unit = {
val index = lastIndex.incrementAndGet()
encoder.write(index, a, output)
}
// TODO: generate `write` methods with 2..22 parameters
}
| julien-lafont/protoless | modules/core/src/main/scala/io/protoless/messages/streams/ProtolessOutputStream.scala | Scala | apache-2.0 | 1,045 |
package spinoco.protocol.rtp.codec
import scodec.Codec
import scodec.codecs._
import shapeless.{::, HNil}
import spinoco.protocol.rtp.{RTPHeaderExtension, RTPPacket}
/**
* Codec for RTP Packet
*/
object RTPPacketCodec {
/*
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |V=2|P|X| CC |M| PT | sequence number |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | timestamp |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | synchronization source (SSRC) identifier |
* +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
* | contributing source (CSRC) identifiers |
* | .... |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
*/
lazy val codec : Codec[RTPPacket] = {
(impl.headerCodec
.flatAppend { h =>
vectorOfN(provide(h(3) /* CC */), int(32))
}.flatAppend { h =>
val x = h(2) // X
if (x) impl.headerExtension.xmap[Some[RTPHeaderExtension]](Some(_), _.get).upcast[Option[RTPHeaderExtension]]
else provide(None).upcast[Option[RTPHeaderExtension]]
} :+ bytes)
.xmap (
{ case v :: padding :: ext :: csrcCount :: marker :: pt :: seqNo :: ts :: ssrc :: csrc :: extensionHeader :: payload :: HNil =>
val unpaddedPayload =
if (! padding) payload
else {
val toDrop = payload.takeRight(1).toInt(signed = false)
payload.dropRight(toDrop)
}
RTPPacket(
version = v
, marker = marker
, payloadType = pt
, sequenceNumber = seqNo
, timestamp = ts
, ssrc = ssrc
, csrc = csrc
, payload = unpaddedPayload
, extensionHeader = extensionHeader
)
}
, pkt => {
val padded = (pkt.payload.size % 4).toInt
val paddedPayload = pkt.payload ++ paddingMapBytes(padded) // padded may be only 0, 1, 2 or 3
pkt.version :: (padded != 0) :: pkt.extensionHeader.nonEmpty :: pkt.csrc.size :: pkt.marker ::
pkt.payloadType :: pkt.sequenceNumber :: pkt.timestamp :: pkt.ssrc :: pkt.csrc.toVector ::
pkt.extensionHeader :: paddedPayload :: HNil
})
}
object impl {
lazy val headerCodec = {
("Version" | version) ::
("Padding" | bool) ::
("Extension" | bool) ::
("CSRCCount" | uint(4)) ::
("Marker" | bool) ::
("Payload Type" | uint(7)) ::
("Sequence number" | uint(16)) ::
("Timestamp" | ulong(32)) ::
("SSRC" | int(32))
}
/*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | defined by profile | length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | header extension |
* | .... |
*
*/
val headerExtension: Codec[RTPHeaderExtension] = {
(
("Flag" | uint16) ::
("Extension content" | variableSizeBytes(wordSizeCodec, bytes))
).xmap (
{ case flag :: content :: HNil => RTPHeaderExtension(flag, content) }
, { h => h.flag :: h.content :: HNil }
)
}
}
}
| Spinoco/protocol | rtp/src/main/scala/spinoco/protocol/rtp/codec/RTPPacketCodec.scala | Scala | mit | 3,836 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.calcite
import org.apache.flink.table.api.TableException
import org.apache.flink.table.plan.util._
import org.apache.flink.table.runtime.rank.{ConstantRankRange, RankRange, RankType, VariableRankRange}
import org.apache.calcite.plan.{RelOptCluster, RelOptCost, RelOptPlanner, RelTraitSet}
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeField}
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollation, RelNode, RelWriter, SingleRel}
import org.apache.calcite.util.{ImmutableBitSet, NumberUtil}
import scala.collection.JavaConversions._
/**
* Relational expression that returns the rows in which the rank number of each row
* is in the given range.
*
* The node is an optimization of `OVER` for some special cases,
* e.g.
* {{{
* SELECT * FROM (
* SELECT a, b, RANK() OVER (PARTITION BY b ORDER BY c) rk FROM MyTable) t
* WHERE rk < 10
* }}}
* can be converted to this node.
*
* @param cluster cluster that this relational expression belongs to
* @param traitSet the traits of this rel
* @param input input relational expression
* @param partitionKey partition keys (may be empty)
* @param orderKey order keys (should not be empty)
* @param rankType rank type to define how exactly generate rank number
* @param rankRange the expected range of rank number value
* @param rankNumberType the field type of rank number
* @param outputRankNumber whether output rank number
*/
abstract class Rank(
cluster: RelOptCluster,
traitSet: RelTraitSet,
input: RelNode,
val partitionKey: ImmutableBitSet,
val orderKey: RelCollation,
val rankType: RankType,
val rankRange: RankRange,
val rankNumberType: RelDataTypeField,
val outputRankNumber: Boolean)
extends SingleRel(cluster, traitSet, input) {
if (orderKey.getFieldCollations.isEmpty) {
throw new TableException("orderKey should not be empty.")
}
rankRange match {
case r: ConstantRankRange =>
if (r.getRankEnd <= 0) {
throw new TableException(
s"Rank end can't smaller than zero. The rank end is ${r.getRankEnd}")
}
if (r.getRankStart > r.getRankEnd) {
throw new TableException(
s"Rank start '${r.getRankStart}' can't greater than rank end '${r.getRankEnd}'.")
}
case v: VariableRankRange =>
if (v.getRankEndIndex < 0) {
throw new TableException(s"Rank end index can't smaller than zero.")
}
if (v.getRankEndIndex >= input.getRowType.getFieldCount) {
throw new TableException(s"Rank end index can't greater than input field count.")
}
}
override def deriveRowType(): RelDataType = {
if (!outputRankNumber) {
return input.getRowType
}
// output row type = input row type + rank number type
val typeFactory = cluster.getRexBuilder.getTypeFactory
val typeBuilder = typeFactory.builder()
input.getRowType.getFieldList.foreach(typeBuilder.add)
typeBuilder.add(rankNumberType)
typeBuilder.build()
}
override def explainTerms(pw: RelWriter): RelWriter = {
val select = getRowType.getFieldNames.zipWithIndex.map {
case (name, idx) => s"$name=$$$idx"
}.mkString(", ")
super.explainTerms(pw)
.item("rankType", rankType)
.item("rankRange", rankRange)
.item("partitionBy", partitionKey.map(i => s"$$$i").mkString(","))
.item("orderBy", RelExplainUtil.collationToString(orderKey))
.item("select", select)
}
override def estimateRowCount(mq: RelMetadataQuery): Double = {
val countPerGroup = FlinkRelMdUtil.getRankRangeNdv(rankRange)
if (partitionKey.isEmpty) {
// only one group
countPerGroup
} else {
val inputRowCount = mq.getRowCount(input)
val numOfGroup = mq.getDistinctRowCount(input, partitionKey, null)
if (numOfGroup != null) {
NumberUtil.min(numOfGroup * countPerGroup, inputRowCount)
} else {
NumberUtil.min(mq.getRowCount(input) * 0.1, inputRowCount)
}
}
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val rowCount = mq.getRowCount(input)
val cpuCost = rowCount
planner.getCostFactory.makeCost(rowCount, cpuCost, 0)
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/calcite/Rank.scala | Scala | apache-2.0 | 5,170 |
package org.talkingpuffin.ui
import javax.swing.table.AbstractTableModel
import swing.Reactor
import org.talkingpuffin.filter._
class CompoundFilterModel(cpdFilters: CompoundFilters) extends AbstractTableModel with Reactor {
private val colNames = List("From", "R", "To", "R", "Text", "R", "Source", "R", "RT", "CRT")
listenTo(cpdFilters)
reactions += {
case _: CompoundFiltersChanged => fireTableDataChanged
}
override def getColumnName(column: Int) = colNames(column)
def getColumnCount = 10
def getRowCount = cpdFilters.list.length
override def getColumnClass(columnIndex: Int) = classOf[String]
override def getValueAt(rowIndex: Int, columnIndex: Int): Object = {
val cpdFilter = cpdFilters.list(rowIndex)
columnIndex match {
case 0 => cpdFilter.textFilters.find(_.isInstanceOf[FromTextFilter]) match {
case Some(cf) => cf.text
case _ => ""
}
case 1 => cpdFilter.textFilters.find(_.isInstanceOf[FromTextFilter]) match {
case Some(cf) => if (cf.isRegEx) "✓" else ""
case _ => ""
}
case 2 => cpdFilter.textFilters.find(_.isInstanceOf[ToTextFilter]) match {
case Some(cf) => cf.text
case _ => ""
}
case 3 => cpdFilter.textFilters.find(_.isInstanceOf[ToTextFilter]) match {
case Some(cf) => if (cf.isRegEx) "✓" else ""
case _ => ""
}
case 4 => cpdFilter.textFilters.find(_.isInstanceOf[TextTextFilter]) match {
case Some(cf) => cf.text
case _ => ""
}
case 5 => cpdFilter.textFilters.find(_.isInstanceOf[TextTextFilter]) match {
case Some(cf) => if (cf.isRegEx) "✓" else ""
case _ => ""
}
case 6 => cpdFilter.textFilters.find(_.isInstanceOf[SourceTextFilter]) match {
case Some(cf) => cf.text
case _ => ""
}
case 7 => cpdFilter.textFilters.find(_.isInstanceOf[SourceTextFilter]) match {
case Some(cf) => if (cf.isRegEx) "✓" else ""
case _ => ""
}
case 8 => if (cpdFilter.retweet.getOrElse(false)) "✓" else ""
case 9 => if (cpdFilter.commentedRetweet.getOrElse(false)) "✓" else ""
}
}
} | dcbriccetti/talking-puffin | desktop/src/main/scala/org/talkingpuffin/ui/CompoundFilterModel.scala | Scala | mit | 2,178 |
/*
* Copyright (C) 2015 Language Technology Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package model.queryable.impl
import model.{Entity, EntityType}
import scalikejdbc.NamedDB
import testFactories.{DatabaseRollback, FlatSpecWithDatabaseTrait}
// scalastyle:off
import scalikejdbc._
// scalastyle:on
class EntityQueryableImplTest extends FlatSpecWithDatabaseTrait with DatabaseRollback {
override def testDatabase: NamedDB = NamedDB('newsleakTestDB)
// Mocking setup
final class RelationshipQueryableTestable extends RelationshipQueryableImpl {
override def connector: NamedDB = testDatabase
}
final class EntityQueryableTestable extends EntityQueryableImpl {
override def connector: NamedDB = testDatabase
override val relationship = new RelationshipQueryableTestable
}
val uut = new EntityQueryableTestable
override def beforeAll(): Unit = {
testDatabase.localTx { implicit session =>
sql"INSERT INTO entity VALUES (1, ${"Angela Merkel"}, ${"PER"}, 7, false)".update.apply()
sql"INSERT INTO entity VALUES (2, ${"Angela Brecht"}, ${"PER"}, 3, false)".update.apply()
sql"INSERT INTO entity VALUES (3, ${"The Backlist"}, ${"PER"}, 3, true)".update.apply()
sql"INSERT INTO entity VALUES (4, ${"Angela Merkel"}, ${"ORG"}, 4, false)".update.apply()
// Angela Brecht occurs in the first document 10 times
sql"INSERT INTO documententity VALUES (1, 2, 10)".update.apply()
// Relation: Angela Merkel - Angela Brecht with frequency 3
sql"INSERT INTO relationship VALUES (1, 1, 2, 3, false)".update.apply()
}
}
"getByName" should "return the entity with the given name" in {
val expected = List(
Entity(1, "Angela Merkel", EntityType.Person, 7),
Entity(4, "Angela Merkel", EntityType.Organization, 4)
)
val actual = uut.getByName("Angela Merkel")
assert(actual === expected)
}
// h2 don't supports ilike queries
ignore should "return entities that share common names" in {
val expected = List(
Entity(1, "Angela Merkel", EntityType.Person, 7),
Entity(2, "Angela Brecht", EntityType.Person, 3),
Entity(4, "Angela Merkel", EntityType.Organization, 4)
)
val actual = uut.getByNamePattern("Angela")
assert(actual === expected)
}
"getById" should "not return blacklisted entities" in {
assert(None == uut.getById(3))
}
"getById" should "return the correct entity" in {
val expected = Some(Entity(1, "Angela Merkel", EntityType.Person, 7))
val actual = uut.getById(1)
assert(actual == expected)
}
"getByType" should "return entities corresponding to this type" in {
val expected = List(
Entity(1, "Angela Merkel", EntityType.Person, 7),
Entity(2, "Angela Brecht", EntityType.Person, 3)
)
val actual = uut.getByType(EntityType.Person)
assert(actual == expected)
}
"delete" should "set the backlist flag to true" in {
uut.delete(1)
val actual = testDatabase.readOnly { implicit session =>
sql"SELECT isblacklisted FROM entity WHERE id = 1".map(_.boolean("isblacklisted")).single().apply()
}.getOrElse(fail)
assert(actual)
}
"changeType" should "return false if not successful" in {
val actual = uut.changeType(7, EntityType.Organization)
assert(!actual)
}
"changeType" should "change entities type to the given one" in {
uut.changeType(1, EntityType.Organization)
val actual = testDatabase.readOnly { implicit session =>
sql"SELECT type FROM entity WHERE id = 1".map(rs => EntityType.withName(rs.string("type"))).single().apply()
}.getOrElse(fail)
assert(actual == EntityType.Organization)
}
"add" should "return the updated entity if already present" in {
val expected = Some(Entity(2, "Angela Brecht", EntityType.Person, 4))
val actual = uut.add(1, "Angela Brecht", EntityType.Person)
assert(actual == expected)
}
"merge" should "blacklist duplicates" in {
uut.merge(1, List(2))
assert(uut.getById(2) == None)
}
"merge" should "not produce self-referring relationships" in {
uut.merge(1, List(2))
assert(uut.relationship.getById(1) == None)
}
}
| tudarmstadt-lt/newsleak | common/src/test/scala/model/queryable/impl/EntityQueryableImplTest.scala | Scala | agpl-3.0 | 4,810 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.{IOException, NotSerializableException, ObjectInputStream}
import org.apache.spark.internal.config.UNSAFE_EXCEPTION_ON_MEMORY_LEAK
import org.apache.spark.memory.TestMemoryConsumer
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.NonSerializable
// Common state shared by FailureSuite-launched tasks. We use a global object
// for this because any local variables used in the task closures will rightfully
// be copied for each task, so there's no other way for them to share state.
object FailureSuiteState {
var tasksRun = 0
var tasksFailed = 0
def clear() {
synchronized {
tasksRun = 0
tasksFailed = 0
}
}
}
class FailureSuite extends SparkFunSuite with LocalSparkContext {
// Run a 3-task map job in which task 1 deterministically fails once, and check
// whether the job completes successfully and we ran 4 tasks in total.
test("failure in a single-stage job") {
sc = new SparkContext("local[1,2]", "test")
val results = sc.makeRDD(1 to 3, 3).map { x =>
FailureSuiteState.synchronized {
FailureSuiteState.tasksRun += 1
if (x == 1 && FailureSuiteState.tasksFailed == 0) {
FailureSuiteState.tasksFailed += 1
throw new Exception("Intentional task failure")
}
}
x * x
}.collect()
FailureSuiteState.synchronized {
assert(FailureSuiteState.tasksRun === 4)
}
assert(results.toList === List(1, 4, 9))
FailureSuiteState.clear()
}
// Run a map-reduce job in which a reduce task deterministically fails once.
test("failure in a two-stage job") {
sc = new SparkContext("local[1,2]", "test")
val results = sc.makeRDD(1 to 3).map(x => (x, x)).groupByKey(3).map {
case (k, v) =>
FailureSuiteState.synchronized {
FailureSuiteState.tasksRun += 1
if (k == 1 && FailureSuiteState.tasksFailed == 0) {
FailureSuiteState.tasksFailed += 1
throw new Exception("Intentional task failure")
}
}
(k, v.head * v.head)
}.collect()
FailureSuiteState.synchronized {
assert(FailureSuiteState.tasksRun === 4)
}
assert(results.toSet === Set((1, 1), (2, 4), (3, 9)))
FailureSuiteState.clear()
}
// Run a map-reduce job in which the map stage always fails.
test("failure in a map stage") {
sc = new SparkContext("local", "test")
val data = sc.makeRDD(1 to 3).map(x => { throw new Exception; (x, x) }).groupByKey(3)
intercept[SparkException] {
data.collect()
}
// Make sure that running new jobs with the same map stage also fails
intercept[SparkException] {
data.collect()
}
}
test("failure because task results are not serializable") {
sc = new SparkContext("local[1,1]", "test")
val results = sc.makeRDD(1 to 3).map(x => new NonSerializable)
val thrown = intercept[SparkException] {
results.collect()
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("serializable") ||
thrown.getCause.getClass === classOf[NotSerializableException],
"Exception does not contain \\"serializable\\": " + thrown.getMessage)
FailureSuiteState.clear()
}
test("failure because task closure is not serializable") {
sc = new SparkContext("local[1,1]", "test")
val a = new NonSerializable
// Non-serializable closure in the final result stage
val thrown = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map(x => a).count()
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getMessage.contains("NotSerializableException") ||
thrown.getCause.getClass === classOf[NotSerializableException])
// Non-serializable closure in an earlier stage
val thrown1 = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map(x => (x, a)).partitionBy(new HashPartitioner(3)).count()
}
assert(thrown1.getClass === classOf[SparkException])
assert(thrown1.getMessage.contains("NotSerializableException") ||
thrown1.getCause.getClass === classOf[NotSerializableException])
// Non-serializable closure in foreach function
val thrown2 = intercept[SparkException] {
// scalastyle:off println
sc.parallelize(1 to 10, 2).foreach(x => println(a))
// scalastyle:on println
}
assert(thrown2.getClass === classOf[SparkException])
assert(thrown2.getMessage.contains("NotSerializableException") ||
thrown2.getCause.getClass === classOf[NotSerializableException])
FailureSuiteState.clear()
}
test("managed memory leak error should not mask other failures (SPARK-9266") {
val conf = new SparkConf().set(UNSAFE_EXCEPTION_ON_MEMORY_LEAK, true)
sc = new SparkContext("local[1,1]", "test", conf)
// If a task leaks memory but fails due to some other cause, then make sure that the original
// cause is preserved
val thrownDueToTaskFailure = intercept[SparkException] {
sc.parallelize(Seq(0)).mapPartitions { iter =>
val c = new TestMemoryConsumer(TaskContext.get().taskMemoryManager())
TaskContext.get().taskMemoryManager().allocatePage(128, c)
throw new Exception("intentional task failure")
iter
}.count()
}
assert(thrownDueToTaskFailure.getMessage.contains("intentional task failure"))
// If the task succeeded but memory was leaked, then the task should fail due to that leak
val thrownDueToMemoryLeak = intercept[SparkException] {
sc.parallelize(Seq(0)).mapPartitions { iter =>
val c = new TestMemoryConsumer(TaskContext.get().taskMemoryManager())
TaskContext.get().taskMemoryManager().allocatePage(128, c)
iter
}.count()
}
assert(thrownDueToMemoryLeak.getMessage.contains("memory leak"))
}
// Run a 3-task map job in which task 1 always fails with a exception message that
// depends on the failure number, and check that we get the last failure.
test("last failure cause is sent back to driver") {
sc = new SparkContext("local[1,2]", "test")
val data = sc.makeRDD(1 to 3, 3).map { x =>
FailureSuiteState.synchronized {
FailureSuiteState.tasksRun += 1
if (x == 3) {
FailureSuiteState.tasksFailed += 1
throw new UserException("oops",
new IllegalArgumentException("failed=" + FailureSuiteState.tasksFailed))
}
}
x * x
}
val thrown = intercept[SparkException] {
data.collect()
}
FailureSuiteState.synchronized {
assert(FailureSuiteState.tasksRun === 4)
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getCause.getClass === classOf[UserException])
assert(thrown.getCause.getMessage === "oops")
assert(thrown.getCause.getCause.getClass === classOf[IllegalArgumentException])
assert(thrown.getCause.getCause.getMessage === "failed=2")
FailureSuiteState.clear()
}
test("failure cause stacktrace is sent back to driver if exception is not serializable") {
sc = new SparkContext("local", "test")
val thrown = intercept[SparkException] {
sc.makeRDD(1 to 3).foreach { _ => throw new NonSerializableUserException }
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getCause === null)
assert(thrown.getMessage.contains("NonSerializableUserException"))
FailureSuiteState.clear()
}
test("failure cause stacktrace is sent back to driver if exception is not deserializable") {
sc = new SparkContext("local", "test")
val thrown = intercept[SparkException] {
sc.makeRDD(1 to 3).foreach { _ => throw new NonDeserializableUserException }
}
assert(thrown.getClass === classOf[SparkException])
assert(thrown.getCause === null)
assert(thrown.getMessage.contains("NonDeserializableUserException"))
FailureSuiteState.clear()
}
// Run a 3-task map stage where one task fails once.
test("failure in tasks in a submitMapStage") {
sc = new SparkContext("local[1,2]", "test")
val rdd = sc.makeRDD(1 to 3, 3).map { x =>
FailureSuiteState.synchronized {
FailureSuiteState.tasksRun += 1
if (x == 1 && FailureSuiteState.tasksFailed == 0) {
FailureSuiteState.tasksFailed += 1
throw new Exception("Intentional task failure")
}
}
(x, x)
}
val dep = new ShuffleDependency[Int, Int, Int](rdd, new HashPartitioner(2))
sc.submitMapStage(dep).get()
FailureSuiteState.synchronized {
assert(FailureSuiteState.tasksRun === 4)
}
FailureSuiteState.clear()
}
test("failure because cached RDD partitions are missing from DiskStore (SPARK-15736)") {
sc = new SparkContext("local[1,2]", "test")
val rdd = sc.parallelize(1 to 2, 2).persist(StorageLevel.DISK_ONLY)
rdd.count()
// Directly delete all files from the disk store, triggering failures when reading cached data:
SparkEnv.get.blockManager.diskBlockManager.getAllFiles().foreach(_.delete())
// Each task should fail once due to missing cached data, but then should succeed on its second
// attempt because the missing cache locations will be purged and the blocks will be recomputed.
rdd.count()
}
test("SPARK-16304: Link error should not crash executor") {
sc = new SparkContext("local[1,2]", "test")
intercept[SparkException] {
sc.parallelize(1 to 2).foreach { i =>
// scalastyle:off throwerror
throw new LinkageError()
// scalastyle:on throwerror
}
}
}
// TODO: Need to add tests with shuffle fetch failures.
}
class UserException(message: String, cause: Throwable)
extends RuntimeException(message, cause)
class NonSerializableUserException extends RuntimeException {
val nonSerializableInstanceVariable = new NonSerializable
}
class NonDeserializableUserException extends RuntimeException {
private def readObject(in: ObjectInputStream): Unit = {
throw new IOException("Intentional exception during deserialization.")
}
}
| pgandhi999/spark | core/src/test/scala/org/apache/spark/FailureSuite.scala | Scala | apache-2.0 | 10,877 |
package com.blinkbox.books.reading
import com.blinkbox.books.spray.v2.{Image, Link}
import org.joda.time.DateTime
case class BookDetails(
isbn: String,
title: String,
author: String,
sortableAuthor: String,
addedDate: DateTime,
ownership: Ownership,
readingStatus: ReadingStatus,
readingPosition: ReadingPosition,
images: List[Image],
links: List[Link]) {
}
trait SampleResult
case object SampleAdded extends SampleResult
case object SampleAlreadyExists extends SampleResult
| blinkboxbooks/reading-service | public/src/main/scala/com/blinkbox/books/reading/Models.scala | Scala | mit | 498 |
//======================================================================================================================
// Facsimile: A Discrete-Event Simulation Library
// Copyright © 2004-2020, Michael J Allen.
//
// This file is part of Facsimile.
//
// Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
// details.
//
// You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see:
//
// http://www.gnu.org/licenses/lgpl.
//
// The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
// project home page at:
//
// http://facsim.org/
//
// Thank you for your interest in the Facsimile project!
//
// IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for
// inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If
// your code fails to comply with the standard, then your patches will be rejected. For further information, please
// visit the coding standards at:
//
// http://facsim.org/Documentation/CodingStandards/
//======================================================================================================================
//======================================================================================================================
// Scala source file belonging to the org.facsim.types.phys.test types.
//======================================================================================================================
package org.facsim.types.phys.test
import org.facsim.types.phys.LinearScaleConverter
import org.facsim.util.test.CommonTestMethods
import org.scalatest.FunSpec
// Disable test-problematic Scalastyle checkers.
//scalastyle:off scaladoc
//scalastyle:off public.methods.have.type
//scalastyle:off multiple.string.literals
//scalastyle:off magic.numbers
/** Test suite for the [[LinearScaleConverter]] class. */
class LinearScaleConverterTest
extends FunSpec
with CommonTestMethods {
/** Construction test data.
*
* Note: The values 0.0 and 1.0 are illegal factor values, and so are not included here.
*/
trait ConstructorTestData {
new LinearScaleConverter(Double.MinValue)
new LinearScaleConverter(-1.0)
new LinearScaleConverter(-Double.MinPositiveValue)
new LinearScaleConverter(Double.MinPositiveValue)
new LinearScaleConverter(Double.MaxValue)
}
/** Import/export test data. */
trait ImportExportTestData {
val tolerance = 1.0e-12
val factorTen = new LinearScaleConverter(10.0)
val factorMinusTen = new LinearScaleConverter(-10.0)
val i1 = 10.0
val e1_1 = 100.0
val e1_2 = -100.0
val i2 = -10.0
val e2_1 = -100.0
val e2_2 = 100.0
}
// Test fixture description.
describe(classOf[LinearScaleConverter].getCanonicalName) {
// Constructor tests.
describe(".this(Double)") {
// Verify that it should reject illegal factor values.
def doFiniteFailure(badFactor: Double) = {
val e = intercept[IllegalArgumentException] {
new LinearScaleConverter(badFactor)
}
assertRequireFiniteMsg(e, "factor", badFactor)
}
it("must throw an IllegalArgumentException if passed NaN") {
doFiniteFailure(Double.NaN)
}
it("must throw an IllegalArgumentException if passed -Infinity") {
doFiniteFailure(Double.NegativeInfinity)
}
it("must throw an IllegalArgumentException if passed Infinity") {
doFiniteFailure(Double.PositiveInfinity)
}
def doValidFailure(badFactor: Double) = {
val e = intercept[IllegalArgumentException] {
new LinearScaleConverter(badFactor)
}
assertRequireValidMsg(e, "factor", badFactor)
}
it("must throw an IllegalArgumentException if passed a zero factor") {
doValidFailure(0.0)
}
it("must throw an IllegalArgumentException is passed a factor of one") {
doValidFailure(1.0)
}
// Verify that it accepts just about any other value (not all of which make sense).
it("must accept valid factor values") {
new ConstructorTestData {}
}
}
// Importing tests.
describe(".importValue(Double)") {
// Check that we get the right imported value.
it("must import values correctly") {
new ImportExportTestData {
// Helper function to perform comparisons.
def checkReturn(factor: LinearScaleConverter, importVal: Double, exportVal: Double) = {
assert(factor.importValue(importVal) === exportVal)
}
// The return value must exactly match the value passed. There can be no rounding errors.
checkReturn(factorTen, 0.0, 0.0)
checkReturn(factorTen, i1, e1_1)
checkReturn(factorTen, i2, e2_1)
checkReturn(factorMinusTen, 0.0, 0.0)
checkReturn(factorMinusTen, i1, e1_2)
checkReturn(factorMinusTen, i2, e2_2)
}
}
}
// Exporting tests
describe(".exportValue(Double)") {
// Check that we get the right imported value.
it("must export values correctly") {
new ImportExportTestData {
// Helper function to perform comparisons.
def checkReturn(factor: LinearScaleConverter, importVal: Double, exportVal: Double) = {
assert(factor.exportValue(exportVal) === importVal)
}
// The return value must exactly match the value passed. There can be no rounding errors.
checkReturn(factorTen, 0.0, 0.0)
checkReturn(factorTen, i1, e1_1)
checkReturn(factorTen, i2, e2_1)
checkReturn(factorMinusTen, 0.0, 0.0)
checkReturn(factorMinusTen, i1, e1_2)
checkReturn(factorMinusTen, i2, e2_2)
}
}
}
}
}
// Re-enable test-problematic Scalastyle checkers.
//scalastyle:on scaladoc
//scalastyle:on public.methods.have.type
//scalastyle:on multiple.string.literals
//scalastyle:on magic.numbers | MichaelJAllen/facsimile | facsimile-types/src/test/scala/org/facsim/types/phys/test/LinearScaleConverterTest.scala | Scala | lgpl-3.0 | 6,518 |
package com.judopay.connect.statsd
import com.timgroup.statsd.NonBlockingStatsDClient
/**
* Created by marktranter on 07/03/17.
*/
trait StatsdClient {
def recordGauge(key: String, stat: Long)
def incrementCounter(key: String)
}
object TimGroupStatsdClient {
def apply(connectionString: String) = {
connectionString.split(':').toList match {
case x :: Nil => throw new Exception("Invalid connection string format. Must be in the format host:port")
case x :: xs => new TimGroupStatsdClient(x, xs.head.toInt)
}
}
}
class TimGroupStatsdClient(private[statsd] val statsdHostName: String, private[statsd] val statsdPort: Int) extends StatsdClient {
private val wrapped = new NonBlockingStatsDClient("", statsdHostName, statsdPort)
override def recordGauge(key: String, stat: Long): Unit = wrapped.recordGaugeValue(key, stat)
override def incrementCounter(key: String): Unit = wrapped.incrementCounter(key)
} | mtranter/kafka-connect-statsd | kafka-connect-statsd/src/main/scala/com/judopay/connect/statsd/StatsdClient.scala | Scala | mit | 948 |
package giter8.launcher
import java.io.{File, FileInputStream}
import java.util.Properties
import giter8.G8.RichFile
import giter8.{Git, JGitInteractor, SystemPaths, TempDir, VersionNumber}
import scala.util.{Failure, Success, Try}
object Launcher extends TempDir {
private lazy val git = new Git(new JGitInteractor())
/**
* Looks up the `giter8.version` property from the template
*
* @param template
* @return
*/
private def templateVersion(template: String): Try[Option[VersionNumber]] =
git.withRepo(template) {
base =>
Try {
val properties: Properties = {
val p = new Properties()
p.load(new FileInputStream(base / "project" / "build.properties"))
p
}
val result: Option[String] = Option(properties.getProperty("giter8.version"))
result
}.flatMap {
string =>
Try(string.map(VersionNumber.apply))
}
}.flatten
/**
* Resolves the version from `giter8.version` system property
*/
private lazy val propertyVersion: Try[Option[VersionNumber]] =
Success(sys.env.get("giter8.version").map(VersionNumber.apply))
/**
* Resolves the version of giter8 to be launched.
*
* Precedence:
* `--g8Version` command line argument
* Looks up the `giter8.version` property from the template project
* `giter8.version` system property
*
* Returns `None` if we can't find a version from either of these
*
* @param g8Version
* @param template
* @return
*/
private def version(g8Version: Option[String], template: String): Try[Option[VersionNumber]] = {
Try(g8Version.map(VersionNumber.apply)) or
templateVersion(template) or
propertyVersion
}
/**
* Creates another JVM process running the `sbt-launcher` jar with the resolved
* `launchconfig`
*
* This downloads the version of giter8 for that launchconfig and passes on
* the arguments that this process was created with (minus some filtered args)
*
* @param launchJar
* @param lc
* @param args
* @param v
* @return
*/
private def fetchAndRun(
launchJar: File,
lc: File,
args: Array[String],
v: Option[VersionNumber]
): Try[String] =
withTempdir {
base =>
import scala.sys.process._
val java: File =
SystemPaths.javaHome / "bin" / "java"
println(s"Fetching Giter8 ${v.getOrElse("LATEST")}")
val command = Seq(
java.getPath, "-DGITER8_FORKED=true", "-jar",
launchJar.getPath,
"@" + lc.toURI
) ++ args
val exit = command.run(BasicIO.standard(connectInput = true)).exitValue()
if (exit == 0) {
Success("Success!")
} else {
Failure(new RuntimeException(s"Failure, exit code: $exit"))
}
}.flatten
def launch(template: String, g8Version: Option[String], args: Array[String]): Try[String] =
for {
lJar <- SbtLaunchJar.get
v <- version(g8Version, template)
lc <- SbtLaunchConfig.get(v)
result <- fetchAndRun(lJar, lc, args, v)
} yield result
}
| wolfendale/giter8 | library/src/main/scala/giter8/launcher/Launcher.scala | Scala | apache-2.0 | 3,317 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.table
import java.util
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.runtime.utils.{TableProgramsCollectionTestBase, TableProgramsTestBase}
import org.apache.flink.table.utils.MemoryTableSourceSinkUtil
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class TableEnvironmentITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testSimpleRegister(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet(tableName, ds)
val t = tEnv.scan(tableName).select('_1, '_2, '_3)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testRegisterWithFieldsByPosition(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet(tableName, ds, 'a, 'b, 'c) // new alias
val t = tEnv.scan(tableName).select('a, 'b)
val expected = "1,1\\n" + "2,2\\n" + "3,2\\n" + "4,3\\n" + "5,3\\n" + "6,3\\n" +
"7,4\\n" + "8,4\\n" + "9,4\\n" + "10,4\\n" + "11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" + "19,6\\n" + "20,6\\n" + "21,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testRegisterWithFieldsByName(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet(tableName, ds, '_3, '_1, '_2) // new order
val t = tEnv.scan(tableName).select('_1, '_2)
val expected = "1,1\\n" + "2,2\\n" + "3,2\\n" + "4,3\\n" + "5,3\\n" + "6,3\\n" +
"7,4\\n" + "8,4\\n" + "9,4\\n" + "10,4\\n" + "11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" + "19,6\\n" + "20,6\\n" + "21,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testTableRegister(): Unit = {
val tableName = "MyTable"
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable(tableName, t)
val regT = tEnv.scan(tableName).select('a, 'b).filter('a > 8)
val expected = "9,4\\n" + "10,4\\n" +
"11,5\\n" + "12,5\\n" + "13,5\\n" + "14,5\\n" +
"15,5\\n" + "16,6\\n" + "17,6\\n" + "18,6\\n" +
"19,6\\n" + "20,6\\n" + "21,6\\n"
val results = regT.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('a, 'b, 'c)
val expected = "1,1,Hi\\n" + "2,2,Hello\\n" + "3,2,Hello world\\n" +
"4,3,Hello world, how are you?\\n" + "5,3,I am fine.\\n" + "6,3,Luke Skywalker\\n" +
"7,4,Comment#1\\n" + "8,4,Comment#2\\n" + "9,4,Comment#3\\n" + "10,4,Comment#4\\n" +
"11,5,Comment#5\\n" + "12,5,Comment#6\\n" + "13,5,Comment#7\\n" + "14,5,Comment#8\\n" +
"15,5,Comment#9\\n" + "16,6,Comment#10\\n" + "17,6,Comment#11\\n" + "18,6,Comment#12\\n" +
"19,6,Comment#13\\n" + "20,6,Comment#14\\n" + "21,6,Comment#15\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTableFromCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val data = List(
SomeCaseClass("Peter", 28, 4000.00, "Sales"),
SomeCaseClass("Anna", 56, 10000.00, "Engineering"),
SomeCaseClass("Lucy", 42, 6000.00, "HR"))
val t = env.fromCollection(data)
.toTable(tEnv, 'a, 'b, 'c, 'd)
.select('a, 'b, 'c, 'd)
val expected: String =
"Peter,28,4000.0,Sales\\n" +
"Anna,56,10000.0,Engineering\\n" +
"Lucy,42,6000.0,HR\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testToTableFromAndToCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val data = List(
SomeCaseClass("Peter", 28, 4000.00, "Sales"),
SomeCaseClass("Anna", 56, 10000.00, "Engineering"),
SomeCaseClass("Lucy", 42, 6000.00, "HR"))
val t = env.fromCollection(data)
.toTable(tEnv, 'a, 'b, 'c, 'd)
.select('a, 'b, 'c, 'd)
val expected: String =
"SomeCaseClass(Peter,28,4000.0,Sales)\\n" +
"SomeCaseClass(Anna,56,10000.0,Engineering)\\n" +
"SomeCaseClass(Lucy,42,6000.0,HR)\\n"
val results = t.toDataSet[SomeCaseClass].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testInsertIntoMemoryTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
MemoryTableSourceSinkUtil.clear()
val t = CollectionDataSets.getSmall3TupleDataSet(env).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("sourceTable", t)
val fieldNames = Array("d", "e", "f")
val fieldTypes = tEnv.scan("sourceTable").getSchema.getFieldTypes
val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink
tEnv.registerTableSink("targetTable", fieldNames, fieldTypes, sink)
tEnv.scan("sourceTable")
.select('a, 'b, 'c)
.insertInto("targetTable")
env.execute()
val expected = List("1,1,Hi", "2,2,Hello", "3,2,Hello world")
assertEquals(expected.sorted, MemoryTableSourceSinkUtil.tableDataStrings.sorted)
}
}
object TableEnvironmentITCase {
@Parameterized.Parameters(name = "Table config = {0}")
def parameters(): util.Collection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(TableProgramsTestBase.DEFAULT)
).asJava
}
}
case class SomeCaseClass(name: String, age: Int, salary: Double, department: String) {
def this() { this("", 0, 0.0, "") }
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/batch/table/TableEnvironmentITCase.scala | Scala | apache-2.0 | 8,439 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.analytic
import java.util.Collections
import org.geotools.data.Query
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.iterators.StatsScan
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.stats._
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class StatsProcessTest extends Specification with TestWithFeatureType {
sequential
override val spec = "an_id:java.lang.Integer,attr:java.lang.Long,dtg:Date,*geom:Point:srid=4326"
addFeatures((0 until 150).toArray.map { i =>
val attrs = Array(i.asInstanceOf[AnyRef], (i * 2).asInstanceOf[AnyRef], "2012-01-01T19:00:00Z", "POINT(-77 38)")
val sf = new ScalaSimpleFeature(sft, i.toString)
sf.setAttributes(attrs)
sf
})
def query = new Query(sftName, ECQL.toFilter("dtg DURING 2012-01-01T18:30:00.000Z/2012-01-01T19:30:00.000Z " +
"AND bbox(geom,-80,35,-75,40)"))
"StatsIteratorProcess" should {
val statsIteratorProcess = new StatsProcess()
"work with the MinMax stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "MinMax(attr)", encode = true)
val sf = results.features().next
val minMaxStat = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[MinMax[java.lang.Long]]
minMaxStat.bounds mustEqual (0, 298)
}
"work with the IteratotStackCount stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "IteratorStackCount()", encode = true)
val sf = results.features().next
val isc = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[IteratorStackCount]
isc.count must beGreaterThanOrEqualTo(1L)
}
"work with the Histogram stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "Enumeration(an_id)", encode = true)
val sf = results.features().next
val eh = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[EnumerationStat[java.lang.Integer]]
eh.size mustEqual 150
eh.frequency(0) mustEqual 1
eh.frequency(149) mustEqual 1
eh.frequency(150) mustEqual 0
}
"work with the RangeHistogram stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "Histogram(an_id,5,0,149)", encode = true)
val sf = results.features().next
val rh = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[Histogram[java.lang.Integer]]
rh.length mustEqual 5
forall(0 until 5)(rh.count(_) mustEqual 30)
}
"work with the GroupBy stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "GroupBy(an_id,MinMax(attr))", encode = true)
val sf = results.features().next
val gb = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[GroupBy[_]]
gb.size mustEqual 150
}
"work with the DescriptiveStats stat" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "DescriptiveStats(attr)", encode = true)
val sf = results.features().next
val rh = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[DescriptiveStats]
rh.count mustEqual 150
rh.bounds(0) mustEqual (0, 298)
rh.mean(0) must beCloseTo(149.0, 1e-9)
rh.populationVariance(0) must beCloseTo(7499.666666666667, 1e-9)
rh.populationStandardDeviation(0) must beCloseTo(86.60061585616275, 1e-9)
rh.populationSkewness(0) must beCloseTo(0.0, 1e-9)
rh.populationKurtosis(0) must beCloseTo(1.7998933285923824, 1e-9)
rh.populationExcessKurtosis(0) must beCloseTo(-1.2001066714076176, 1e-9)
rh.sampleVariance(0) must beCloseTo(7550.0, 1e-9)
rh.sampleStandardDeviation(0) must beCloseTo(86.89073598491383, 1e-9)
rh.sampleSkewness(0) must beCloseTo(0.0, 1e-9)
rh.sampleKurtosis(0) must beCloseTo(1.859889772878795, 1e-9)
rh.sampleExcessKurtosis(0) must beCloseTo(-1.140110227121205, 1e-9)
rh.populationCovariance(0) must beCloseTo(7499.666666666667, 1e-9)
rh.populationCorrelation(0) must beCloseTo(1.0, 1e-9)
rh.sampleCovariance(0) must beCloseTo(7550.0, 1e-9)
rh.sampleCorrelation(0) must beCloseTo(1.0, 1e-9)
}
"work with multiple stats at once" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query),
"MinMax(attr);IteratorStackCount();Enumeration(an_id);Histogram(an_id,5,10,14)", encode = true)
val sf = results.features().next
val seqStat = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[SeqStat]
val stats = seqStat.stats
stats.size mustEqual 4
val minMax = stats(0).asInstanceOf[MinMax[java.lang.Long]]
val isc = stats(1).asInstanceOf[IteratorStackCount]
val eh = stats(2).asInstanceOf[EnumerationStat[java.lang.Integer]]
val rh = stats(3).asInstanceOf[Histogram[java.lang.Integer]]
minMax.bounds mustEqual (0, 298)
isc.count must beGreaterThanOrEqualTo(1L)
eh.size mustEqual 150
eh.frequency(0) mustEqual 1
eh.frequency(149) mustEqual 1
eh.frequency(150) mustEqual 0
rh.length mustEqual 5
rh.bounds mustEqual (0, 149)
(0 until 5).map(rh.count).sum mustEqual 150
}
"work with non AccumuloFeatureCollections" in {
val features: DefaultFeatureCollection = new DefaultFeatureCollection(null, sft)
SelfClosingIterator(fs.getFeatures(new Query(sftName, Filter.INCLUDE)).features).foreach(features.add)
val results = statsIteratorProcess.execute(features,
"MinMax(attr);IteratorStackCount();Enumeration(an_id);Histogram(an_id,5,10,14)", encode = true)
val sf = results.features().next
val seqStat = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[SeqStat]
val stats = seqStat.stats
stats.size mustEqual 4
val minMax = stats(0).asInstanceOf[MinMax[java.lang.Long]]
val isc = stats(1).asInstanceOf[IteratorStackCount]
val eh = stats(2).asInstanceOf[EnumerationStat[java.lang.Integer]]
val rh = stats(3).asInstanceOf[Histogram[java.lang.Integer]]
minMax.bounds mustEqual (0, 298)
isc.count mustEqual 1L
eh.size mustEqual 150
eh.frequency(0) mustEqual 1
eh.frequency(149) mustEqual 1
eh.frequency(150) mustEqual 0
rh.length mustEqual 5
rh.bounds mustEqual (0, 149)
(0 until 5).map(rh.count).sum mustEqual 150
}
"return stats encoded as json" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "MinMax(attr)", false)
val sf = results.features().next
val expectedOutput = """{ "min": 0, "max": 298, "cardinality": 152 }"""
sf.getAttribute(0) must beEqualTo(expectedOutput).ignoreSpace
}
"return stats encoded as json with non-Accumulo Feature collections" in {
val features: DefaultFeatureCollection = new DefaultFeatureCollection(null, sft)
SelfClosingIterator(fs.getFeatures(new Query(sftName, Filter.INCLUDE)).features).foreach(features.add)
val results = statsIteratorProcess.execute(features, "MinMax(attr)", false)
val sf = results.features().next
val expectedOutput = """{ "min": 0, "max": 298, "cardinality": 152 }"""
sf.getAttribute(0) must beEqualTo(expectedOutput).ignoreSpace
}
"return stats binary encoded as with Accumulo Feature collections" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "MinMax(attr)", true)
val sf = results.features().next
val stat = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[MinMax[Long]]
stat.min mustEqual(0)
stat.max mustEqual(298)
}
"return stats binary encoded as with non-Accumulo Feature collections" in {
val features: DefaultFeatureCollection = new DefaultFeatureCollection(null, sft)
SelfClosingIterator(fs.getFeatures(new Query(sftName, Filter.INCLUDE)).features).foreach(features.add)
val results = statsIteratorProcess.execute(features, "MinMax(attr)", true)
val sf = results.features().next
val stat = StatsScan.decodeStat(sft)(sf.getAttribute(0).asInstanceOf[String]).asInstanceOf[MinMax[Long]]
stat.min mustEqual(0)
stat.max mustEqual(298)
}
"return transforms stats encoded as json" in {
val results = statsIteratorProcess.execute(fs.getFeatures(query), "MinMax(attr1)", false, Collections.singletonList("attr1=attr+5"))
val sf = results.features().next
// NB: Doubles <=> Ints:(
val expectedOutput = """{ "min": 5.0, "max": 303.0, "cardinality": 149 }"""
sf.getAttribute(0) must beEqualTo(expectedOutput).ignoreSpace
}
"return transforms stats encoded as json with non AccumuloFeatureCollections" in {
val features: DefaultFeatureCollection = new DefaultFeatureCollection(null, sft)
SelfClosingIterator(fs.getFeatures(new Query(sftName, Filter.INCLUDE)).features).foreach(features.add)
val results = statsIteratorProcess.execute(features, "MinMax(attr1)", false, Collections.singletonList("attr1=attr+5"))
val sf = results.features().next
// NB: Doubles <=> Ints:(
val expectedOutput = """{ "min": 5.0, "max": 303.0, "cardinality": 149 }"""
sf.getAttribute(0) must beEqualTo(expectedOutput).ignoreSpace
}
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/process/analytic/StatsProcessTest.scala | Scala | apache-2.0 | 10,276 |
//package sp.merger
//
//import org.scalatest._
//import org.scalatest.concurrent.ScalaFutures
//
//
///**
// * Created by kristofer on 19/11/14.
// */
//class ProductAbilityMergerTest extends FreeSpec with Matchers with ScalaFutures{
//
// import sp.domain._
//
// val modelID = ID.makeID("596f6a75-9419-458d-a703-410043d3c54b").get
// val prodID = ID.makeID("596f6a75-9419-458d-a703-410043d3c54c").get
// val abilityID = ID.makeID("596f6a75-9419-458d-a703-410043d3c54d").get
//
// val pam = new ProductAbilityMerger(null)
//
// "when getting extracting attributes" - {
// "we will get a correct future" in {
// val a = Attr(
// "model" -> IDPrimitive(modelID),
// "product" -> IDPrimitive(prodID),
// "abilities" -> IDPrimitive(abilityID)
// )
//
// val res = pam.extractAttr(a)
// res.futureValue shouldEqual (modelID, prodID, abilityID)
// }
// }
//
//}
| kristoferB/SP | sp1/src/test/scala/sp/merger/ProductAbilityMergerTest.scala | Scala | mit | 912 |
import scala.collection.immutable.Map
object Solution {
def comb(xs: List[Int]) = {
def g(m: Map[Int, Int], x: Int) =
(m get x) match {
case None => m updated (x, 1)
case Some(n) => m updated (x, n + 1)
}
(xs foldLeft (Map.empty[Int, Int]))(g)
}
def f0(xs: List[Int], k: Int): List[Int] = {
def g(acc: (List[Int], Map[Int, Int]), x: Int) =
(acc._2 get x) match {
case None => acc
case Some(n) =>
if (n >= k)
(x :: acc._1, acc._2 - x)
else
(acc._1, acc._2 - x)
}
(xs.foldLeft (List[Int](), comb(xs)))(g)._1.reverse
}
def f(xs: List[Int], k: Int) =
f0(xs, k) match {
case Nil => "-1"
case xs => (xs map (_.toString)) mkString " "
}
def main(args: Array[String]) = {
val t = readLine.toInt
for (ix <- 0 until t) {
val p = readLine.split(" ").map(_.toInt).toList
val xs = readLine.split(" ").map(_.toInt).toList
println(f(xs, p(1)))
}
}
}
| pbl64k/HackerRank-Contests | 2014-05-16-FP/FilterElements/fe.accepted.scala | Scala | bsd-2-clause | 1,199 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala
import org.junit.Test
import org.junit.Assert.assertNotNull
import org.junit.Assert.assertTrue
import org.junit.Assert.assertFalse
import org.junit.Assert.assertEquals
import org.scalatest.junit.JUnitSuite
class SubscriberTests extends JUnitSuite {
@Test def testIssue1173() {
// https://github.com/Netflix/RxJava/issues/1173
val subscriber = Subscriber((n: Int) => println(n))
assertNotNull(subscriber.asJavaObserver)
assertNotNull(subscriber.asJavaSubscription)
assertNotNull(subscriber.asJavaSubscriber)
}
@Test def testUnsubscribeForSubscriber() {
var innerSubscriber: Subscriber[Int] = null
val o = Observable[Int](subscriber => {
Observable[Int](subscriber => {
innerSubscriber = subscriber
}).subscribe(subscriber)
})
o.subscribe().unsubscribe()
// If we unsubscribe outside, the inner Subscriber should also be unsubscribed
assertTrue(innerSubscriber.isUnsubscribed)
}
@Test def testBlockCallbackOnlyOnce() {
var called = false
val o = Observable[Int](subscriber => {
subscriber.add({ called = !called })
})
val subscription = o.subscribe()
subscription.unsubscribe()
subscription.unsubscribe()
// Even if called multiple times, callback is only called once
assertTrue(called)
assertTrue(subscription.isUnsubscribed)
}
@Test def testNewSubscriber(): Unit = {
var didComplete = false
var didError = false
var onNextValue = 0
Observable.just(1).subscribe(new Subscriber[Int] {
override def onCompleted(): Unit = {
didComplete = true
}
override def onError(e: Throwable): Unit = {
didError = true
}
override def onNext(v: Int): Unit = {
onNextValue = v
}
})
assertTrue("Subscriber called onCompleted", didComplete)
assertFalse("Subscriber did not call onError", didError)
assertEquals(1, onNextValue)
}
@Test def testOnStart(): Unit = {
var called = false
Observable.just(1).subscribe(new Subscriber[Int] {
override def onStart(): Unit = {
called = true
}
override def onCompleted(): Unit = {
}
override def onError(e: Throwable): Unit = {
}
override def onNext(v: Int): Unit = {
}
})
assertTrue("Subscriber.onStart should be called", called)
}
@Test def testOnStart2(): Unit = {
val items = scala.collection.mutable.ListBuffer[Int]()
var calledOnCompleted = false
Observable.just(1, 2, 3).subscribe(new Subscriber[Int] {
override def onStart(): Unit = {
request(1)
}
override def onCompleted(): Unit = {
calledOnCompleted = true
}
override def onError(e: Throwable): Unit = {
}
override def onNext(v: Int): Unit = {
items += v
request(1)
}
})
assertEquals(List(1, 2, 3), items)
assertTrue("Subscriber.onCompleted should be called", calledOnCompleted)
}
}
| samuelgruetter/RxScala | src/test/scala/rx/lang/scala/SubscriberTests.scala | Scala | apache-2.0 | 3,599 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.extractor.jsonpath
import scala.annotation.implicitNotFound
import scala.collection.JavaConverters._
import scala.collection.breakOut
import io.gatling.core.json.Json
trait LowPriorityJsonFilterImplicits {
implicit val stringJsonFilter = new JsonFilter[String] {
val filter: PartialFunction[Any, String] = {
case e: Any => Json.stringify(e)
case null => null
}
}
implicit val jBooleanJsonFilter = new JsonFilter[Boolean] {
val filter: PartialFunction[Any, Boolean] = {
case e: java.lang.Boolean => e
case null => null.asInstanceOf[Boolean]
}
}
implicit val integerJsonFilter = new JsonFilter[Int] {
val filter: PartialFunction[Any, Int] = {
case e: Number => e.intValue
case null => null.asInstanceOf[Int]
}
}
implicit val jLongJsonFilter = new JsonFilter[Long] {
val filter: PartialFunction[Any, Long] = {
case e: Number => e.longValue
case null => null.asInstanceOf[Long]
}
}
implicit val jDoubleJsonFilter = new JsonFilter[Double] {
val filter: PartialFunction[Any, Double] = {
case e: Number => e.doubleValue
case null => null.asInstanceOf[Double]
}
}
implicit val jFloatJsonFilter = new JsonFilter[Float] {
val filter: PartialFunction[Any, Float] = {
case e: Number => e.floatValue
case null => null.asInstanceOf[Float]
}
}
implicit val jListJsonFilter = new JsonFilter[Seq[Any]] {
val filter: PartialFunction[Any, Seq[Any]] = {
case e: java.util.List[_] => e.asScala
case null => null.asInstanceOf[Seq[Any]]
}
}
implicit val jMapJsonFilter = new JsonFilter[Map[String, Any]] {
val filter: PartialFunction[Any, Map[String, Any]] = {
case e: java.util.Map[_, _] => e.asScala.map { case (key, value) => key.toString -> value }(breakOut)
case null => null.asInstanceOf[Map[String, Any]]
}
}
implicit val anyJsonFilter = new JsonFilter[Any] {
val filter: PartialFunction[Any, Any] = { case e => e }
}
}
object JsonFilter extends LowPriorityJsonFilterImplicits {
def apply[X: JsonFilter] = implicitly[JsonFilter[X]]
}
@implicitNotFound("No member of type class JsonFilter found for type ${X}")
trait JsonFilter[X] {
def filter: PartialFunction[Any, X]
}
| pwielgolaski/gatling | gatling-core/src/main/scala/io/gatling/core/check/extractor/jsonpath/JsonFilter.scala | Scala | apache-2.0 | 2,992 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index.legacy
import org.locationtech.geomesa.accumulo.data.AccumuloWritableFeature
import org.locationtech.geomesa.accumulo.index.AccumuloJoinIndex
import org.locationtech.geomesa.index.api.ShardStrategy.AttributeShardStrategy
import org.locationtech.geomesa.index.api.{RowKeyValue, WritableFeature}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV5
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV6.AttributeIndexKeySpaceV6
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV7.AttributeIndexKeySpaceV7
import org.locationtech.geomesa.index.index.attribute.{AttributeIndexKey, AttributeIndexKeySpace}
import org.locationtech.geomesa.utils.index.IndexMode.IndexMode
import org.opengis.feature.simple.SimpleFeatureType
class JoinIndexV5(ds: GeoMesaDataStore[_],
sft: SimpleFeatureType,
attribute: String,
secondaries: Seq[String],
mode: IndexMode)
extends AttributeIndexV5(ds, sft, attribute, secondaries, mode) with AccumuloJoinIndex {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
override val keySpace: AttributeIndexKeySpace = {
val sharding = AttributeShardStrategy(sft)
if (sharding.shards.nonEmpty) {
// if sharding, we need to swap the shard bytes with the idx bytes
new AttributeIndexKeySpaceV6(sft, sft.getTableSharingBytes, sharding, attribute) {
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[AttributeIndexKey] = {
val kv = super.toIndexKey(writable, tier, id, lenient)
kv.copy(values = writable.asInstanceOf[AccumuloWritableFeature].indexValues)
}
}
} else {
// otherwise we can skip the swap and use the parent class
new AttributeIndexKeySpaceV7(sft, sft.getTableSharingBytes, sharding, attribute) {
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[AttributeIndexKey] = {
val kv = super.toIndexKey(writable, tier, id, lenient)
kv.copy(values = writable.asInstanceOf[AccumuloWritableFeature].indexValues)
}
}
}
}
}
| aheyne/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/legacy/JoinIndexV5.scala | Scala | apache-2.0 | 3,066 |
package au.com.dius.pact.matchers.util
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class JsonUtilsSpec extends Specification {
"Parsing JSON bodies" in {
"handles a normal JSON body" in {
JsonUtils.parseJsonString(
"{\\"password\\":\\"123456\\",\\"firstname\\":\\"Brent\\",\\"booleam\\":\\"true\\",\\"username\\":\\"bbarke\\",\\"lastname\\":\\"Barker\\"}"
) must beEqualTo(Map("username" -> "bbarke", "firstname" -> "Brent", "lastname" -> "Barker",
"booleam" -> "true", "password" -> "123456"))
}
"handles a String" in {
JsonUtils.parseJsonString("\\"I am a string\\"") must beEqualTo("I am a string")
}
"handles a Number" in {
JsonUtils.parseJsonString("1234") must beEqualTo(1234)
}
"handles a Boolean" in {
JsonUtils.parseJsonString("true") must beEqualTo(true)
}
"handles a Null" in {
JsonUtils.parseJsonString("null") must beNull
}
"handles an array" in {
JsonUtils.parseJsonString("[1, 2, 3, 4]") must beEqualTo(Seq(1, 2, 3, 4))
}
"handles an empty body" in {
JsonUtils.parseJsonString("") must beNull
}
}
}
| flaregames/pact-jvm | pact-jvm-matchers/src/test/scala/au/com/dius/pact/matchers/util/JsonUtilsSpec.scala | Scala | apache-2.0 | 1,224 |
trait X {
val elem: Int = 1
}
object test {
def g(x: X) = x.elem;
def f(x: AnyRef) = x.toString();
}
| yusuke2255/dotty | tests/untried/pos/Z.scala | Scala | bsd-3-clause | 110 |
package recipestore
import com.google.inject.{Guice, Injector}
import recipestore.input.{InputModule, RecipeApi}
object TripleStoreEntryPoint {
var inputModule: Injector = Guice.createInjector(new InputModule)
def main(args: Array[String]) {
loadRecipeData()
}
def loadRecipeData() {
inputModule.getInstance(classOf[RecipeApi]).loadRecipe(true)
}
} | prad-a-RuntimeException/semantic-store | src/main/scala/recipestore/TripleStoreEntryPoint.scala | Scala | mit | 370 |
package org.apache.spark.repl
import scala.reflect._
import scala.reflect.api.{Mirror, Universe, TypeCreator}
import scala.tools.nsc.{io, Properties, Settings, interpreter}
import scala.tools.nsc.interpreter._
import scala.tools.nsc.util.ScalaClassLoader._
import scala.reflect.api.{Mirror, TypeCreator, Universe => ApiUniverse}
import scala.concurrent.{ ExecutionContext, Await, Future, future }
import ExecutionContext.Implicits._
import scala.tools.nsc.interpreter._
class HackSparkILoop(out:JPrintWriter) extends org.apache.spark.repl.SparkILoop(None, out) {
val classServer = {
val s = org.apache.spark.Boot.classServer
s.start
s
}
override def initializeSpark() {
// done using the metadata and init.sc
}
override def printWelcome() {
//
}
override def process(settings: Settings): Boolean = savingContextLoader {
this.settings = settings
createInterpreter()
// sets in to some kind of reader depending on environmental cues
in = chooseReader(settings)// in0.fold(chooseReader(settings))(r => SimpleReader(r, out, interactive = true))
val globalFuture = Future {
intp.initializeSynchronous()
//loopPostInit()
!intp.reporter.hasErrors
}
import scala.concurrent.duration._
Await.ready(globalFuture, 1 minute)
//printWelcome()
//initializeSpark()
loadFiles(settings)
/**
try loop()
catch AbstractOrMissingHandler()
finally closeInterpreter()
*/
true
}
} | cheleb/spark-notebook | modules/spark/src/main/scala_2.11/spark-last/HackSparkILoop.scala | Scala | apache-2.0 | 1,489 |
package org.scalatest.tools
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
class SbtCommandParserSpec extends FunSpec with ShouldMatchers {
val parser = new SbtCommandParser
def canParsePhrase(s: String) {
val result = parser.parseResult(s)
result match {
case ns: parser.NoSuccess => fail(ns.toString)
case _ =>
}
}
def cannotParsePhrase(s: String) {
val result = parser.parseResult(s)
result match {
case parser.Success(result, _) => fail("wasn't supposed to, but parsed: " + result)
case _ =>
}
}
describe("the cmd terminal?") {
it("should parse 'st'") {
canParsePhrase("""st""")
canParsePhrase("""st --""")
}
}
}
| hubertp/scalatest | src/test/scala/org/scalatest/tools/SbtCommandParserSpec.scala | Scala | apache-2.0 | 755 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.github.microburn.integration.trello
import org.github.microburn.integration.support.kanban.StoryPointsFromName
trait TrelloTask {
protected val name: String
val StoryPointsFromName(optionalSp, extractedName) = name
} | arkadius/micro-burn | src/main/scala/org/github/microburn/integration/trello/TrelloTask.scala | Scala | apache-2.0 | 850 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.