code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
// 1,$s/tolerantEquality\[Byte\](/tolerantEquivalence[Byte](tolerance = /
class TolerantEquivalenceSpec extends funspec.AnyFunSpec with TypeCheckedTripleEquals with TolerantNumerics {
val sevenDotOh = 7.0
val minusSevenDotOh = -7.0
val sevenDotOhFloat = 7.0f
val minusSevenDotOhFloat = -7.0f
val sevenLong = 7L
val minusSevenLong = -7L
val sevenInt = 7
val minusSevenInt = -7
val sevenShort: Short = 7
val minusSevenShort: Short = -7
val sevenByte: Byte = 7
val minusSevenByte: Byte = -7
describe("The === syntax") {
it("should be true if the number is within the given interval for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(sevenDotOh === 7.1)
assert(sevenDotOh === 6.9)
assert(sevenDotOh === 7.0)
assert(sevenDotOh === 7.2)
assert(sevenDotOh === 6.8)
assert(minusSevenDotOh === -7.1)
assert(minusSevenDotOh === -6.9)
assert(minusSevenDotOh === -7.0)
assert(minusSevenDotOh === -7.2)
assert(minusSevenDotOh === -6.8)
}
it("should be true if the number is within the given interval for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(sevenDotOhFloat === 7.1f)
assert(sevenDotOhFloat === 6.9f)
assert(sevenDotOhFloat === 7.0f)
assert(sevenDotOhFloat === 7.2f)
assert(sevenDotOhFloat === 6.8f)
assert(minusSevenDotOhFloat === -7.1f)
assert(minusSevenDotOhFloat === -6.9f)
assert(minusSevenDotOhFloat === -7.0f)
assert(minusSevenDotOhFloat === -7.2f)
assert(minusSevenDotOhFloat === -6.8f)
}
it("should be true if the number is within the given interval for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(sevenLong === 9L)
assert(sevenLong === 8L)
assert(sevenLong === 7L)
assert(sevenLong === 6L)
assert(sevenLong === 5L)
assert(minusSevenLong === -9L)
assert(minusSevenLong === -8L)
assert(minusSevenLong === -7L)
assert(minusSevenLong === -6L)
assert(minusSevenLong === -5L)
}
it("should be true if the number is within the given interval for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(sevenInt === 9)
assert(sevenInt === 8)
assert(sevenInt === 7)
assert(sevenInt === 6)
assert(sevenInt === 5)
assert(minusSevenInt === -9)
assert(minusSevenInt === -8)
assert(minusSevenInt === -7)
assert(minusSevenInt === -6)
assert(minusSevenInt === -5)
}
it("should be true if the number is within the given interval for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(sevenShort === 9.toShort)
assert(sevenShort === 8.toShort)
assert(sevenShort === 7.toShort)
assert(sevenShort === 6.toShort)
assert(sevenShort === 5.toShort)
assert(minusSevenShort === (-9).toShort)
assert(minusSevenShort === (-8).toShort)
assert(minusSevenShort === (-7).toShort)
assert(minusSevenShort === (-6).toShort)
assert(minusSevenShort === (-5).toShort)
}
it("should be true if the number is within the given interval for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(sevenByte === 9.toByte)
assert(sevenByte === 8.toByte)
assert(sevenByte === 7.toByte)
assert(sevenByte === 6.toByte)
assert(sevenByte === 5.toByte)
assert(minusSevenByte === (-9).toByte)
assert(minusSevenByte === (-8).toByte)
assert(minusSevenByte === (-7).toByte)
assert(minusSevenByte === (-6).toByte)
assert(minusSevenByte === (-5).toByte)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(7.1 === sevenDotOh)
assert(6.9 === sevenDotOh)
assert(7.0 === sevenDotOh)
assert(7.2 === sevenDotOh)
assert(6.8 === sevenDotOh)
assert(-7.1 === minusSevenDotOh)
assert(-6.9 === minusSevenDotOh)
assert(-7.0 === minusSevenDotOh)
assert(-7.2 === minusSevenDotOh)
assert(-6.8 === minusSevenDotOh)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(7.1f === sevenDotOhFloat)
assert(6.9f === sevenDotOhFloat)
assert(7.0f === sevenDotOhFloat)
assert(7.2f === sevenDotOhFloat)
assert(6.8f === sevenDotOhFloat)
assert(-7.1f === minusSevenDotOhFloat)
assert(-6.9f === minusSevenDotOhFloat)
assert(-7.0f === minusSevenDotOhFloat)
assert(-7.2f === minusSevenDotOhFloat)
assert(-6.8f === minusSevenDotOhFloat)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(9L === sevenLong)
assert(8L === sevenLong)
assert(7L === sevenLong)
assert(6L === sevenLong)
assert(5L === sevenLong)
assert(-9L === minusSevenLong)
assert(-8L === minusSevenLong)
assert(-7L === minusSevenLong)
assert(-6L === minusSevenLong)
assert(-5L === minusSevenLong)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(9 === sevenInt)
assert(8 === sevenInt)
assert(7 === sevenInt)
assert(6 === sevenInt)
assert(5 === sevenInt)
assert(-9 === minusSevenInt)
assert(-8 === minusSevenInt)
assert(-7 === minusSevenInt)
assert(-6 === minusSevenInt)
assert(-5 === minusSevenInt)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(9.toShort === sevenShort)
assert(8.toShort === sevenShort)
assert(7.toShort === sevenShort)
assert(6.toShort === sevenShort)
assert(5.toShort === sevenShort)
assert((-9).toShort === minusSevenShort)
assert((-8).toShort === minusSevenShort)
assert((-7).toShort === minusSevenShort)
assert((-6).toShort === minusSevenShort)
assert((-5).toShort === minusSevenShort)
}
it("should, for symmetry, be true if the number is within the given interval when the interval is placed on the left hand side for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(9.toByte === sevenByte)
assert(8.toByte === sevenByte)
assert(7.toByte === sevenByte)
assert(6.toByte === sevenByte)
assert(5.toByte === sevenByte)
assert((-9).toByte === minusSevenByte)
assert((-8).toByte === minusSevenByte)
assert((-7).toByte === minusSevenByte)
assert((-6).toByte === minusSevenByte)
assert((-5).toByte === minusSevenByte)
}
it("should be false if the number is outside the given interval for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(!(sevenDotOh === 7.5))
assert(!(sevenDotOh === 6.5))
assert(!(minusSevenDotOh === -7.5))
assert(!(minusSevenDotOh === -6.5))
}
it("should be false if the number is outside the given interval for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(!(sevenDotOhFloat === 7.5f))
assert(!(sevenDotOhFloat === 6.5f))
assert(!(minusSevenDotOhFloat === -7.5f))
assert(!(minusSevenDotOhFloat === -6.5f))
}
it("should be false if the number is outside the given interval for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(!(sevenLong === 4L))
assert(!(sevenLong === 10L))
assert(!(minusSevenLong === -4L))
assert(!(minusSevenLong === -10L))
}
it("should be false if the number is outside the given interval for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(!(sevenInt === 4))
assert(!(sevenInt === 10))
assert(!(minusSevenInt === -4))
assert(!(minusSevenInt === -10))
}
it("should be false if the number is outside the given interval for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(!(sevenShort === 4.toShort))
assert(!(sevenShort === 10.toShort))
assert(!(minusSevenShort === (-4).toShort))
assert(!(minusSevenShort === (-10).toShort))
}
it("should be false if the number is outside the given interval for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(!(sevenByte === 4.toByte))
assert(!(sevenByte === 10.toByte))
assert(!(minusSevenByte === (-4).toByte))
assert(!(minusSevenByte === (-10).toByte))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(!(7.5 === sevenDotOh))
assert(!(6.5 === sevenDotOh))
assert(!(-7.5 === minusSevenDotOh))
assert(!(-6.5 === minusSevenDotOh))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(!(7.5f === sevenDotOhFloat))
assert(!(6.5f === sevenDotOhFloat))
assert(!(-7.5f === minusSevenDotOhFloat))
assert(!(-6.5f === minusSevenDotOhFloat))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(!(4L === sevenLong))
assert(!(10L === sevenLong))
assert(!(-4L === minusSevenLong))
assert(!(-10L === minusSevenLong))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(!(4 === sevenInt))
assert(!(10 === sevenInt))
assert(!(-4 === minusSevenInt))
assert(!(-10 === minusSevenInt))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(!(4.toShort === sevenShort))
assert(!(10.toShort === sevenShort))
assert(!((-4).toShort === minusSevenShort))
assert(!((-10).toShort === minusSevenShort))
}
it("should, for symmetry, be false if the number is outside the given interval, when the interval is on the left hand side for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(!(4.toByte === sevenByte))
assert(!(10.toByte === sevenByte))
assert(!((-4).toByte === minusSevenByte))
assert(!((-10).toByte === minusSevenByte))
}
}
describe("The !== syntax") {
it("should be true if the number is outside the given interval for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(sevenDotOh !== 7.5)
assert(sevenDotOh !== 6.5)
assert(minusSevenDotOh !== -7.5)
assert(minusSevenDotOh !== -6.5)
}
it("should be true if the number is outside the given interval for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(sevenDotOhFloat !== 7.5f)
assert(sevenDotOhFloat !== 6.5f)
assert(minusSevenDotOhFloat !== -7.5f)
assert(minusSevenDotOhFloat !== -6.5f)
}
it("should be true if the number is outside the given interval for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(sevenLong !== 4L)
assert(sevenLong !== 10L)
assert(minusSevenLong !== -4L)
assert(minusSevenLong !== -10L)
}
it("should be true if the number is outside the given interval for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(sevenInt !== 4)
assert(sevenInt !== 10)
assert(minusSevenInt !== -4)
assert(minusSevenInt !== -10)
}
it("should be true if the number is outside the given interval for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(sevenShort !== 4.toShort)
assert(sevenShort !== 10.toShort)
assert(minusSevenShort !== (-4).toShort)
assert(minusSevenShort !== (-10).toShort)
}
it("should be true if the number is outside the given interval for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(sevenByte !== 4.toByte)
assert(sevenByte !== 10.toByte)
assert(minusSevenByte !== (-4).toByte)
assert(minusSevenByte !== (-10).toByte)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(7.5 !== sevenDotOh)
assert(6.5 !== sevenDotOh)
assert(-7.5 !== minusSevenDotOh)
assert(-6.5 !== minusSevenDotOh)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(7.5f !== sevenDotOhFloat)
assert(6.5f !== sevenDotOhFloat)
assert(-7.5f !== minusSevenDotOhFloat)
assert(-6.5f !== minusSevenDotOhFloat)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(4L !== sevenLong)
assert(10L !== sevenLong)
assert(-4L !== minusSevenLong)
assert(-10L !== minusSevenLong)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(4 !== sevenInt)
assert(10 !== sevenInt)
assert(-4 !== minusSevenInt)
assert(-10 !== minusSevenInt)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(4.toShort !== sevenShort)
assert(10.toShort !== sevenShort)
assert((-4).toShort !== minusSevenShort)
assert((-10).toShort !== minusSevenShort)
}
it("should, for symmetry, be true if the number is outside the given interval when the interval is placed on the left hand side for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(4.toByte !== sevenByte)
assert(10.toByte !== sevenByte)
assert((-4).toByte !== minusSevenByte)
assert((-10).toByte !== minusSevenByte)
}
it("should be false if the number is within the given interval for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(!(sevenDotOh !== 7.1))
assert(!(sevenDotOh !== 6.9))
assert(!(sevenDotOh !== 7.0))
assert(!(sevenDotOh !== 7.2))
assert(!(sevenDotOh !== 6.8))
assert(!(minusSevenDotOh !== -7.1))
assert(!(minusSevenDotOh !== -6.9))
assert(!(minusSevenDotOh !== -7.0))
assert(!(minusSevenDotOh !== -7.2))
assert(!(minusSevenDotOh !== -6.8))
}
it("should be false if the number is within the given interval for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(!(sevenDotOhFloat !== 7.1f))
assert(!(sevenDotOhFloat !== 6.9f))
assert(!(sevenDotOhFloat !== 7.0f))
assert(!(sevenDotOhFloat !== 7.2f))
assert(!(sevenDotOhFloat !== 6.8f))
assert(!(minusSevenDotOhFloat !== -7.1f))
assert(!(minusSevenDotOhFloat !== -6.9f))
assert(!(minusSevenDotOhFloat !== -7.0f))
assert(!(minusSevenDotOhFloat !== -7.2f))
assert(!(minusSevenDotOhFloat !== -6.8f))
}
it("should be false if the number is within the given interval for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(!(sevenLong !== 9L))
assert(!(sevenLong !== 8L))
assert(!(sevenLong !== 7L))
assert(!(sevenLong !== 6L))
assert(!(sevenLong !== 5L))
assert(!(minusSevenLong !== -9L))
assert(!(minusSevenLong !== -8L))
assert(!(minusSevenLong !== -7L))
assert(!(minusSevenLong !== -6L))
assert(!(minusSevenLong !== -5L))
}
it("should be false if the number is within the given interval for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(!(sevenInt !== 9))
assert(!(sevenInt !== 8))
assert(!(sevenInt !== 7))
assert(!(sevenInt !== 6))
assert(!(sevenInt !== 5))
assert(!(minusSevenInt !== -9))
assert(!(minusSevenInt !== -8))
assert(!(minusSevenInt !== -7))
assert(!(minusSevenInt !== -6))
assert(!(minusSevenInt !== -5))
}
it("should be false if the number is within the given interval for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(!(sevenShort !== 9.toShort))
assert(!(sevenShort !== 8.toShort))
assert(!(sevenShort !== 7.toShort))
assert(!(sevenShort !== 6.toShort))
assert(!(sevenShort !== 5.toShort))
assert(!(minusSevenShort !== (-9).toShort))
assert(!(minusSevenShort !== (-8).toShort))
assert(!(minusSevenShort !== (-7).toShort))
assert(!(minusSevenShort !== (-6).toShort))
assert(!(minusSevenShort !== (-5).toShort))
}
it("should be false if the number is within the given interval for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(!(sevenByte !== 9.toByte))
assert(!(sevenByte !== 8.toByte))
assert(!(sevenByte !== 7.toByte))
assert(!(sevenByte !== 6.toByte))
assert(!(sevenByte !== 5.toByte))
assert(!(minusSevenByte !== (-9).toByte))
assert(!(minusSevenByte !== (-8).toByte))
assert(!(minusSevenByte !== (-7).toByte))
assert(!(minusSevenByte !== (-6).toByte))
assert(!(minusSevenByte !== (-5).toByte))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Double") {
// Double +- Double
implicit val eq: Equivalence[Double] = tolerantEquivalence[Double](tolerance = 0.2)
assert(!(7.1 !== sevenDotOh))
assert(!(6.9 !== sevenDotOh))
assert(!(7.0 !== sevenDotOh))
assert(!(7.2 !== sevenDotOh))
assert(!(6.8 !== sevenDotOh))
assert(!(-7.1 !== minusSevenDotOh))
assert(!(-6.9 !== minusSevenDotOh))
assert(!(-7.0 !== minusSevenDotOh))
assert(!(-7.2 !== minusSevenDotOh))
assert(!(-6.8 !== minusSevenDotOh))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Float") {
// Float +- Float
implicit val eq: Equivalence[Float] = tolerantEquivalence[Float](tolerance = 0.2f)
assert(!(7.1f !== sevenDotOhFloat))
assert(!(6.9f !== sevenDotOhFloat))
assert(!(7.0f !== sevenDotOhFloat))
assert(!(7.2f !== sevenDotOhFloat))
assert(!(6.8f !== sevenDotOhFloat))
assert(!(-7.1f !== minusSevenDotOhFloat))
assert(!(-6.9f !== minusSevenDotOhFloat))
assert(!(-7.0f !== minusSevenDotOhFloat))
assert(!(-7.2f !== minusSevenDotOhFloat))
assert(!(-6.8f !== minusSevenDotOhFloat))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Long") {
// Long +- Long
implicit val eq: Equivalence[Long] = tolerantEquivalence[Long](tolerance = 2L)
assert(!(9L !== sevenLong))
assert(!(8L !== sevenLong))
assert(!(7L !== sevenLong))
assert(!(6L !== sevenLong))
assert(!(5L !== sevenLong))
assert(!(-9L !== minusSevenLong))
assert(!(-8L !== minusSevenLong))
assert(!(-7L !== minusSevenLong))
assert(!(-6L !== minusSevenLong))
assert(!(-5L !== minusSevenLong))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Int") {
// Int +- Int
implicit val eq: Equivalence[Int] = tolerantEquivalence[Int](tolerance = 2)
assert(!(9 !== sevenInt))
assert(!(8 !== sevenInt))
assert(!(7 !== sevenInt))
assert(!(6 !== sevenInt))
assert(!(5 !== sevenInt))
assert(!(-9 !== minusSevenInt))
assert(!(-8 !== minusSevenInt))
assert(!(-7 !== minusSevenInt))
assert(!(-6 !== minusSevenInt))
assert(!(-5 !== minusSevenInt))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Short") {
// Short +- Short
implicit val eq: Equivalence[Short] = tolerantEquivalence[Short](tolerance = 2.toShort)
assert(!(9.toShort !== sevenShort))
assert(!(8.toShort !== sevenShort))
assert(!(7.toShort !== sevenShort))
assert(!(6.toShort !== sevenShort))
assert(!(5.toShort !== sevenShort))
assert(!((-9).toShort !== minusSevenShort))
assert(!((-8).toShort !== minusSevenShort))
assert(!((-7).toShort !== minusSevenShort))
assert(!((-6).toShort !== minusSevenShort))
assert(!((-5).toShort !== minusSevenShort))
}
it("should, for symmetry, be false if the number is within the given interval, when the interval is placed on the left hand side for Byte") {
// Byte +- Byte
implicit val eq: Equivalence[Byte] = tolerantEquivalence[Byte](tolerance = 2.toByte)
assert(!(9.toByte !== sevenByte))
assert(!(8.toByte !== sevenByte))
assert(!(7.toByte !== sevenByte))
assert(!(6.toByte !== sevenByte))
assert(!(5.toByte !== sevenByte))
assert(!((-9).toByte !== minusSevenByte))
assert(!((-8).toByte !== minusSevenByte))
assert(!((-7).toByte !== minusSevenByte))
assert(!((-6).toByte !== minusSevenByte))
assert(!((-5).toByte !== minusSevenByte))
}
}
describe("The X +- Y syntax") {
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Double") {
// Double +- Double
val caught1 = intercept[IllegalArgumentException] {
tolerantEquivalence[Double](tolerance = -0.2)
}
assert(caught1.getMessage === (-0.2).toString + " passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.", caught1.getMessage)
}
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Float") {
// Float +- Float
val caught7 = intercept[IllegalArgumentException] {
tolerantEquivalence[Float](tolerance = -0.2f)
}
assert(caught7.getMessage === (-0.2f).toString + " passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.")
}
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Long") {
// Long +- Long
val caught12 = intercept[IllegalArgumentException] {
tolerantEquivalence[Long](tolerance = -2L)
}
assert(caught12.getMessage === "-2 passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.")
}
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Int") {
// Int +- Int
val caught16 = intercept[IllegalArgumentException] {
tolerantEquivalence[Int](tolerance = -2)
}
assert(caught16.getMessage === "-2 passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.")
}
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Short") {
// Short +- Short
val caught19 = intercept[IllegalArgumentException] {
tolerantEquivalence[Short](tolerance = (-2).toShort)
}
assert(caught19.getMessage === "-2 passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.")
}
it("should throw IllegalArgumentException if the number passed to the right is 0 or negative for Byte") {
// Byte +- Byte
val caught21 = intercept[IllegalArgumentException] {
tolerantEquivalence[Byte](tolerance = (-2).toByte)
}
assert(caught21.getMessage === "-2 passed to tolerantEquivalence was zero or negative. Must be a positive non-zero number.")
}
}
describe("TolerantNumeric's tolerantEquivalence method") {
it("should return Equivalences with a pretty toString") {
assert(tolerantEquivalence(1.0).toString === "TolerantEquivalence(" + (1.0).toString + ")")
assert(tolerantEquivalence(2.2).toString === "TolerantEquivalence(" + (2.2).toString + ")")
assert(tolerantEquivalence(1.0f).toString === "TolerantEquivalence(" + (1.0f).toString + ")")
assert(tolerantEquivalence(2.2f).toString === "TolerantEquivalence(" + (2.2f).toString + ")")
assert(tolerantEquivalence(1L).toString === "TolerantEquivalence(1)")
assert(tolerantEquivalence(2L).toString === "TolerantEquivalence(2)")
assert(tolerantEquivalence(1).toString === "TolerantEquivalence(1)")
assert(tolerantEquivalence(2).toString === "TolerantEquivalence(2)")
assert(tolerantEquivalence(1.toShort).toString === "TolerantEquivalence(1)")
assert(tolerantEquivalence(2.toShort).toString === "TolerantEquivalence(2)")
assert(tolerantEquivalence(1.toByte).toString === "TolerantEquivalence(1)")
assert(tolerantEquivalence(2.toByte).toString === "TolerantEquivalence(2)")
}
}
}
|
scalatest/scalatest
|
jvm/scalactic-test/src/test/scala/org/scalactic/TolerantEquivalenceSpec.scala
|
Scala
|
apache-2.0
| 29,290
|
package mesosphere.marathon.api.v2
import mesosphere.marathon._
import mesosphere.marathon.api.TestAuthFixture
import mesosphere.marathon.core.election.ElectionService
import mesosphere.marathon.test.{ MarathonSpec, Mockito }
import org.scalatest.{ GivenWhenThen, Matchers }
class LeaderResourceTest extends MarathonSpec with Matchers with Mockito with GivenWhenThen {
test("access without authentication is denied") {
Given("An unauthenticated request")
val f = new Fixture
val resource = f.leaderResource()
f.auth.authenticated = false
When("we try to get the leader info")
val index = resource.index(f.auth.request)
Then("we receive a NotAuthenticated response")
index.getStatus should be(f.auth.NotAuthenticatedStatus)
When("we try to delete the current leader")
val delete = resource.delete(f.auth.request)
Then("we receive a NotAuthenticated response")
delete.getStatus should be(f.auth.NotAuthenticatedStatus)
}
test("access without authorization is denied") {
Given("An unauthenticated request")
val f = new Fixture
val resource = f.leaderResource()
f.auth.authenticated = true
f.auth.authorized = false
When("we try to get the leader info")
val index = resource.index(f.auth.request)
Then("we receive a Unauthorized response")
index.getStatus should be(f.auth.UnauthorizedStatus)
When("we try to delete the current leader")
val delete = resource.delete(f.auth.request)
Then("we receive a Unauthorized response")
delete.getStatus should be(f.auth.UnauthorizedStatus)
}
class Fixture {
val schedulerService = mock[MarathonSchedulerService]
val electionService = mock[ElectionService]
val auth = new TestAuthFixture
val config = AllConf.withTestConfig("--event_subscriber", "http_callback")
def leaderResource() = new LeaderResource(electionService, config, auth.auth, auth.auth)
}
}
|
timcharper/marathon
|
src/test/scala/mesosphere/marathon/api/v2/LeaderResourceTest.scala
|
Scala
|
apache-2.0
| 1,932
|
package dtable
import com.typesafe.config.ConfigFactory
import shared.model.User
import scala.concurrent.{Await, Future}
import slick.jdbc.SQLiteProfile.api._
import scala.concurrent.duration._
package object dblayer {
def oper[T](action: Future[T]): T = {
Await.result(action, 10 second)
}
def call[T1,T2](query: Query[T1, T2, Seq])(implicit db:Database )={
oper(db.run(query.result))
}
val config = ConfigFactory.load()
val dbname = config.getString("db.uri")
implicit lazy val db = Database.forURL(dbname, driver = "org.sqlite.JDBC")
}
|
SergiiPolokhalo/DTable
|
server/src/main/scala/dtable/dblayer/package.scala
|
Scala
|
apache-2.0
| 567
|
package uk.ac.ncl.openlab.intake24.services.systemdb.admin
import java.security.SecureRandom
import java.util.Base64
object URLAuthTokenUtils {
private val secureRandom = new SecureRandom()
private val base64Encoder = Base64.getUrlEncoder()
private val allowedChars = "ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjkmnpqrstuvwxyz23456789"
/*def generateToken = {
val bytes = new Array[Byte](24)
secureRandom.nextBytes(bytes)
base64Encoder.encodeToString(bytes)
}*/
def generateToken = {
val chars = allowedChars.toCharArray
val password = new StringBuilder()
for (i <- 0 until 9) {
password.append(chars(secureRandom.nextInt(chars.length)))
}
password.toString()
}
}
|
digitalinteraction/intake24
|
SystemDataServices/src/main/scala/uk/ac/ncl/openlab/intake24/services/systemdb/admin/URLAuthTokenUtils.scala
|
Scala
|
apache-2.0
| 713
|
package de.htwg.zeta.persistence.actorCache
import java.util.UUID
import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.Failure
import scala.util.Success
import akka.actor.Actor
import akka.actor.ActorRef
import akka.actor.Cancellable
import akka.actor.Props
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.CleanUp
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Create
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Delete
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Read
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Update
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.unitFuture
import de.htwg.zeta.persistence.authInfo.ZetaLoginInfo
import de.htwg.zeta.persistence.general.LoginInfoRepository
import grizzled.slf4j.Logging
private[actorCache] object LoginInfoCacheActor {
case class Create(loginInfo: ZetaLoginInfo, userId: UUID)
case class Read(loginInfo: ZetaLoginInfo)
case class Update(old: ZetaLoginInfo, updated: ZetaLoginInfo)
case class Delete(loginInfo: ZetaLoginInfo)
private case object CleanUp
private val unitFuture: Future[Unit] = Future.successful(())
def props(underlying: LoginInfoRepository, cacheDuration: FiniteDuration): Props = Props(new LoginInfoCacheActor(underlying, cacheDuration))
}
private[actorCache] class LoginInfoCacheActor(underlying: LoginInfoRepository, cacheDuration: FiniteDuration) extends Actor with Logging {
private val cache: mutable.Map[ZetaLoginInfo, Future[UUID]] = mutable.Map.empty
private val used: mutable.Set[ZetaLoginInfo] = mutable.Set.empty
private val cleanUpJob: Cancellable = context.system.scheduler.scheduleAtFixedRate(cacheDuration, cacheDuration, self, CleanUp)
override def receive: Receive = {
case Create(loginInfo, userId) => create(loginInfo, userId)
case Read(loginInfo) => read(loginInfo)
case Update(old, updated) => update(old, updated)
case Delete(loginInfo) => delete(loginInfo)
case CleanUp => cleanUp()
}
private def create(loginInfo: ZetaLoginInfo, userId: UUID): Unit = {
val entry = mapOrRecoverToUnit(cache.get(loginInfo)).flatMap(_ => underlying.create(loginInfo, userId))
replyToSender(entry, sender)
cache += (loginInfo -> entry.map(_ => userId))
used += loginInfo
}
private def read(loginInfo: ZetaLoginInfo): Unit = {
val entry = cache.get(loginInfo).fold(underlying.read(loginInfo))(_.recoverWith { case _ => underlying.read(loginInfo) })
replyToSender(entry, sender)
cache += (loginInfo -> entry)
used += loginInfo
}
private def update(old: ZetaLoginInfo, updated: ZetaLoginInfo): Unit = {
val entry = mapOrRecoverToUnit(cache.get(old)).flatMap(_ => underlying.update(old, updated))
replyToSender(entry, sender)
cache -= old
used -= old
}
private def delete(loginInfo: ZetaLoginInfo): Unit = {
val entry = mapOrRecoverToUnit(cache.get(loginInfo)).flatMap(_ => underlying.delete(loginInfo))
replyToSender(entry, sender)
cache -= loginInfo
used -= loginInfo
}
private def mapOrRecoverToUnit(f: Option[Future[UUID]]): Future[Unit] = {
f.fold(unitFuture)(_.flatMap(_ => unitFuture).recoverWith { case _ => unitFuture })
}
private def replyToSender[T](f: Future[T], target: ActorRef): Unit = {
f.onComplete {
case Success(s) => target ! Success(s)
case Failure(e) => target ! Failure(e)
}
}
private def cleanUp(): Unit = {
val unused = cache.keySet.filter(!used.contains(_))
unused.foreach(cache.remove)
used.clear()
}
override def postStop(): Unit = {
cleanUpJob.cancel()
}
}
|
Zeta-Project/zeta
|
api/persistence/src/main/scala/de/htwg/zeta/persistence/actorCache/LoginInfoCacheActor.scala
|
Scala
|
bsd-2-clause
| 3,801
|
package com.ubeeko.exceptions
/**
* Created with IntelliJ IDEA.
* User: elb
* Date: 10/06/13
* Time: 15:28
*/
trait NotImplementedOperation extends IllegalOperationException
object NotImplementedOperation {
def apply(message: String, cause: Throwable = null) = new Exception(message, cause) with NotImplementedOperation
}
//TODO temporary java compatible case class
case class JavaCompatibleNotImplementedOperation(message: String, cause: Throwable = null) extends Exception(message, cause) with NotImplementedOperation {
def this(message: String) = this(message, null)
}
|
eric-leblouch/htalk
|
src/main/scala/com/ubeeko/exceptions/NotImplementedOperation.scala
|
Scala
|
apache-2.0
| 583
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal
import java.util.concurrent.TimeUnit
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.util.Utils
package object config {
private[spark] val DRIVER_CLASS_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val DRIVER_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val DRIVER_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.driver.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val DRIVER_MEMORY = ConfigBuilder("spark.driver.memory")
.doc("Amount of memory to use for the driver process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead")
.doc("The amount of off-heap memory to be allocated per driver in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val EVENT_LOG_COMPRESS =
ConfigBuilder("spark.eventLog.compress")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_BLOCK_UPDATES =
ConfigBuilder("spark.eventLog.logBlockUpdates.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_TESTING =
ConfigBuilder("spark.eventLog.testing")
.internal()
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_OUTPUT_BUFFER_SIZE = ConfigBuilder("spark.eventLog.buffer.kb")
.doc("Buffer size to use when writing to output streams, in KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("100k")
private[spark] val EVENT_LOG_OVERWRITE =
ConfigBuilder("spark.eventLog.overwrite").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_CLASS_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH).stringConf.createOptional
private[spark] val EXECUTOR_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS).stringConf.createOptional
private[spark] val EXECUTOR_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH).stringConf.createOptional
private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.executor.userClassPathFirst").booleanConf.createWithDefault(false)
private[spark] val EXECUTOR_MEMORY = ConfigBuilder("spark.executor.memory")
.doc("Amount of memory to use per executor process, in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead")
.doc("The amount of off-heap memory to be allocated per executor in cluster mode, " +
"in MiB unless otherwise specified.")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val MEMORY_OFFHEAP_ENABLED = ConfigBuilder("spark.memory.offHeap.enabled")
.doc("If true, Spark will attempt to use off-heap memory for certain operations. " +
"If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive.")
.withAlternative("spark.unsafe.offHeap")
.booleanConf
.createWithDefault(false)
private[spark] val MEMORY_OFFHEAP_SIZE = ConfigBuilder("spark.memory.offHeap.size")
.doc("The absolute amount of memory in bytes which can be used for off-heap allocation. " +
"This setting has no impact on heap memory usage, so if your executors' total memory " +
"consumption must fit within some hard limit then be sure to shrink your JVM heap size " +
"accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ >= 0, "The off-heap memory size must not be negative")
.createWithDefault(0)
private[spark] val IS_PYTHON_APP = ConfigBuilder("spark.yarn.isPython").internal()
.booleanConf.createWithDefault(false)
private[spark] val CPUS_PER_TASK = ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.initialExecutors")
.fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
.doubleConf.createWithDefault(1.0)
private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("3s")
private[spark] val SHUFFLE_SERVICE_ENABLED =
ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
private[spark] val KEYTAB = ConfigBuilder("spark.yarn.keytab")
.doc("Location of user's keytab.")
.stringConf.createOptional
private[spark] val PRINCIPAL = ConfigBuilder("spark.yarn.principal")
.doc("Name of the Kerberos principal.")
.stringConf.createOptional
private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
.intConf
.createOptional
private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
.internal()
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val MAX_TASK_FAILURES =
ConfigBuilder("spark.task.maxFailures")
.intConf
.createWithDefault(4)
// Blacklist confs
private[spark] val BLACKLIST_ENABLED =
ConfigBuilder("spark.blacklist.enabled")
.booleanConf
.createOptional
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)
private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val BLACKLIST_TIMEOUT_CONF =
ConfigBuilder("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_KILL_ENABLED =
ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)
private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
.internal()
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED =
ConfigBuilder("spark.blacklist.application.fetchFailure.enabled")
.booleanConf
.createWithDefault(false)
// End blacklist confs
private[spark] val UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE =
ConfigBuilder("spark.files.fetchFailure.unRegisterOutputOnHost")
.doc("Whether to un-register all the outputs on the host in condition that we receive " +
" a FetchFailure. This is set default to false, which means, we only un-register the " +
" outputs related to the exact executor(instead of the host) on a FetchFailure.")
.booleanConf
.createWithDefault(false)
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must not be negative")
.createWithDefault(10000)
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
.internal()
.intConf
.createWithDefault(128)
// This property sets the root namespace for metrics reporting
private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
.stringConf
.createOptional
private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
.stringConf
.createOptional
private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
.stringConf
.createOptional
// To limit how many applications are shown in the History Server summary ui
private[spark] val HISTORY_UI_MAX_APPS =
ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE)
private[spark] val UI_SHOW_CONSOLE_PROGRESS = ConfigBuilder("spark.ui.showConsoleProgress")
.doc("When true, show the progress bar in the console.")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
ConfigBuilder("spark.io.encryption.keygen.algorithm")
.stringConf
.createWithDefault("HmacSHA1")
private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
.intConf
.checkValues(Set(128, 192, 256))
.createWithDefault(128)
private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
ConfigBuilder("spark.io.crypto.cipher.transformation")
.internal()
.stringConf
.createWithDefaultString("AES/CTR/NoPadding")
private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
.doc("Address of driver endpoints.")
.stringConf
.createWithDefault(Utils.localCanonicalHostName())
private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
.doc("Address where to bind network listen sockets on the driver.")
.fallbackConf(DRIVER_HOST_ADDRESS)
private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
.doc("Port to use for the block manager when a more specific setting is not provided.")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
.doc("Port to use for the block manager on the driver.")
.fallbackConf(BLOCK_MANAGER_PORT)
private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
.doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
"encountering corrupted or non-existing files and contents that have been read will still " +
"be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val IGNORE_MISSING_FILES = ConfigBuilder("spark.files.ignoreMissingFiles")
.doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " +
"encountering missing files and the contents that have been read will still be returned.")
.booleanConf
.createWithDefault(false)
private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
.stringConf
.createOptional
private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
.doc("The maximum number of bytes to pack into a single partition when reading files.")
.longConf
.createWithDefault(128 * 1024 * 1024)
private[spark] val FILES_OPEN_COST_IN_BYTES = ConfigBuilder("spark.files.openCostInBytes")
.doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" +
" the same time. This is used when putting multiple files into a partition. It's better to" +
" over estimate, then the partitions with small files will be faster than partitions with" +
" bigger files.")
.longConf
.createWithDefault(4 * 1024 * 1024)
private[spark] val HADOOP_RDD_IGNORE_EMPTY_SPLITS =
ConfigBuilder("spark.hadoopRDD.ignoreEmptySplits")
.internal()
.doc("When true, HadoopRDD/NewHadoopRDD will not create partitions for empty input splits.")
.booleanConf
.createWithDefault(false)
private[spark] val SECRET_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.regex")
.doc("Regex to decide which Spark configuration properties and environment variables in " +
"driver and executor environments contain sensitive information. When this regex matches " +
"a property key or value, the value is redacted from the environment UI and various logs " +
"like YARN and event logs.")
.regexConf
.createWithDefault("(?i)secret|password".r)
private[spark] val STRING_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.string.regex")
.doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
"information. When this regex matches a string part, that string part is replaced by a " +
"dummy value. This is currently used to redact the output of SQL explain commands.")
.regexConf
.createOptional
private[spark] val AUTH_SECRET_BIT_LENGTH =
ConfigBuilder("spark.authenticate.secretBitLength")
.intConf
.createWithDefault(256)
private[spark] val NETWORK_AUTH_ENABLED =
ConfigBuilder("spark.authenticate")
.booleanConf
.createWithDefault(false)
private[spark] val SASL_ENCRYPTION_ENABLED =
ConfigBuilder("spark.authenticate.enableSaslEncryption")
.booleanConf
.createWithDefault(false)
private[spark] val NETWORK_ENCRYPTION_ENABLED =
ConfigBuilder("spark.network.crypto.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val BUFFER_WRITE_CHUNK_SIZE =
ConfigBuilder("spark.buffer.write.chunkSize")
.internal()
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= Int.MaxValue, "The chunk size during writing out the bytes of" +
" ChunkedByteBuffer should not larger than Int.MaxValue.")
.createWithDefault(64 * 1024 * 1024)
private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
"spark.io.compression.codec.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_ACCURATE_BLOCK_THRESHOLD =
ConfigBuilder("spark.shuffle.accurateBlockThreshold")
.doc("Threshold in bytes above which the size of shuffle blocks in " +
"HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM " +
"by avoiding underestimating shuffle block size when fetch shuffle blocks.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(100 * 1024 * 1024)
private[spark] val SHUFFLE_REGISTRATION_TIMEOUT =
ConfigBuilder("spark.shuffle.registration.timeout")
.doc("Timeout in milliseconds for registration to the external shuffle service.")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(5000)
private[spark] val SHUFFLE_REGISTRATION_MAX_ATTEMPTS =
ConfigBuilder("spark.shuffle.registration.maxAttempts")
.doc("When we fail to register to the external shuffle service, we will " +
"retry for maxAttempts times.")
.intConf
.createWithDefault(3)
private[spark] val REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS =
ConfigBuilder("spark.reducer.maxBlocksInFlightPerAddress")
.doc("This configuration limits the number of remote blocks being fetched per reduce task " +
"from a given host port. When a large number of blocks are being requested from a given " +
"address in a single fetch or simultaneously, this could crash the serving executor or " +
"Node Manager. This is especially useful to reduce the load on the Node Manager when " +
"external shuffle is enabled. You can mitigate the issue by setting it to a lower value.")
.intConf
.checkValue(_ > 0, "The max no. of blocks in flight cannot be non-positive.")
.createWithDefault(Int.MaxValue)
private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM =
ConfigBuilder("spark.maxRemoteBlockSizeFetchToMem")
.doc("Remote block will be fetched to disk when size of the block is above this threshold " +
"in bytes. This is to avoid a giant request takes too much memory. We can enable this " +
"config by setting a specific value(e.g. 200m). Note this configuration will affect " +
"both shuffle fetch and block manager remote block fetch. For users who enabled " +
"external shuffle service, this feature can only be worked when external shuffle" +
"service is newer than Spark 2.2.")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(Long.MaxValue)
private[spark] val TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES =
ConfigBuilder("spark.taskMetrics.trackUpdatedBlockStatuses")
.doc("Enable tracking of updatedBlockStatuses in the TaskMetrics. Off by default since " +
"tracking the block statuses can use a lot of memory and its not used anywhere within " +
"spark.")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_FILE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.file.buffer")
.doc("Size of the in-memory buffer for each shuffle file output stream, in KiB unless " +
"otherwise specified. These buffers reduce the number of disk seeks and system calls " +
"made in creating intermediate shuffle files.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= Int.MaxValue / 1024,
s"The file buffer size must be greater than 0 and less than ${Int.MaxValue / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.unsafe.file.output.buffer")
.doc("The file system for this buffer size after each partition " +
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= Int.MaxValue / 1024,
s"The buffer size must be greater than 0 and less than ${Int.MaxValue / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.spill.diskWriteBufferSize")
.doc("The buffer size, in bytes, to use when writing the sorted records to an on-disk file.")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 0 && v <= Int.MaxValue,
s"The buffer size must be greater than 0 and less than ${Int.MaxValue}.")
.createWithDefault(1024 * 1024)
private[spark] val UNROLL_MEMORY_CHECK_PERIOD =
ConfigBuilder("spark.storage.unrollMemoryCheckPeriod")
.internal()
.doc("The memory check period is used to determine how often we should check whether "
+ "there is a need to request more memory when we try to unroll the given block in memory.")
.longConf
.createWithDefault(16)
private[spark] val UNROLL_MEMORY_GROWTH_FACTOR =
ConfigBuilder("spark.storage.unrollMemoryGrowthFactor")
.internal()
.doc("Memory to request as a multiple of the size that used to unroll the block.")
.doubleConf
.createWithDefault(1.5)
private[spark] val FORCE_DOWNLOAD_SCHEMES =
ConfigBuilder("spark.yarn.dist.forceDownloadSchemes")
.doc("Comma-separated list of schemes for which files will be downloaded to the " +
"local disk prior to being added to YARN's distributed cache. For use in cases " +
"where the YARN service does not support schemes that are supported by Spark, like http, " +
"https and ftp.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val UI_X_XSS_PROTECTION =
ConfigBuilder("spark.ui.xXssProtection")
.doc("Value for HTTP X-XSS-Protection response header")
.stringConf
.createWithDefaultString("1; mode=block")
private[spark] val UI_X_CONTENT_TYPE_OPTIONS =
ConfigBuilder("spark.ui.xContentTypeOptions.enabled")
.doc("Set to 'true' for setting X-Content-Type-Options HTTP response header to 'nosniff'")
.booleanConf
.createWithDefault(true)
private[spark] val UI_STRICT_TRANSPORT_SECURITY =
ConfigBuilder("spark.ui.strictTransportSecurity")
.doc("Value for HTTP Strict Transport Security Response Header")
.stringConf
.createOptional
private[spark] val EXTRA_LISTENERS = ConfigBuilder("spark.extraListeners")
.doc("Class names of listeners to add to SparkContext during initialization.")
.stringConf
.toSequence
.createOptional
private[spark] val SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.numElementsForceSpillThreshold")
.internal()
.doc("The maximum number of elements in memory before forcing the shuffle sorter to spill. " +
"By default it's Integer.MAX_VALUE, which means we never force the sorter to spill, " +
"until we reach some limitations, like the max page size limitation for the pointer " +
"array in the sorter.")
.intConf
.createWithDefault(Integer.MAX_VALUE)
private[spark] val SHUFFLE_MAP_OUTPUT_PARALLEL_AGGREGATION_THRESHOLD =
ConfigBuilder("spark.shuffle.mapOutput.parallelAggregationThreshold")
.internal()
.doc("Multi-thread is used when the number of mappers * shuffle partitions is greater than " +
"or equal to this threshold. Note that the actual parallelism is calculated by number of " +
"mappers * shuffle partitions / this threshold + 1, so this threshold should be positive.")
.intConf
.checkValue(v => v > 0, "The threshold should be positive.")
.createWithDefault(10000000)
private[spark] val MAX_RESULT_SIZE = ConfigBuilder("spark.driver.maxResultSize")
.doc("Size limit for results.")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("1g")
private[spark] val CREDENTIALS_RENEWAL_INTERVAL_RATIO =
ConfigBuilder("spark.security.credentials.renewalRatio")
.doc("Ratio of the credential's expiration time when Spark should fetch new credentials.")
.doubleConf
.createWithDefault(0.75d)
private[spark] val CREDENTIALS_RENEWAL_RETRY_WAIT =
ConfigBuilder("spark.security.credentials.retryWait")
.doc("How long to wait before retrying to fetch new credentials after a failure.")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1h")
}
|
lxsmnv/spark
|
core/src/main/scala/org/apache/spark/internal/config/package.scala
|
Scala
|
apache-2.0
| 24,136
|
/*
* The MIT License (MIT)
*
* Copyright (C) 2012 47 Degrees, LLC http://47deg.com hello@47deg.com
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
package com.fortysevendeg.mvessel.util
case class ConnectionValues(name: String, params: Map[String, String])
trait ConnectionStringParser {
val urlRegex = "jdbc:sqlite:(:?[\\\\/\\\\.\\\\-_A-Za-z0-9]+:?)(\\\\?([A-Za-z0-9]+=[A-Za-z0-9]+)((\\\\&([A-Za-z0-9]+=[A-Za-z0-9]+))*)?)?".r
def parseConnectionString(connectionString: String): Option[ConnectionValues] = {
Option(connectionString) flatMap { c =>
val matcher = urlRegex.pattern.matcher(c)
matcher.matches() match {
case true =>
Some(ConnectionValues(
name = matcher.group(1),
params = readParams(matcher.group(3), matcher.group(4))))
case _ =>
None
}
}
}
private[this] def readParams(firstParam: String, lastParams: String): Map[String, String] =
(Option(firstParam), Option(lastParams)) match {
case (Some(f), Some(t)) if f.length > 0 => paramsToMap(f) ++ paramsToMap(cleanParams(t): _*)
case (Some(f), None) if f.length > 0 => paramsToMap(f)
case _ => Map.empty
}
private[this] def paramsToMap(params: String*): Map[String, String] =
Map(params map { p =>
val Array(name, value) = p.split("=")
name -> value
}: _*)
private[this] def cleanParams(params: String): Seq[String] =
params.split("\\\\&").filter(_.length > 0)
}
|
47deg/mvessel
|
core/src/main/scala/com/fortysevendeg/mvessel/util/ConnectionStringParser.scala
|
Scala
|
mit
| 2,019
|
package achauvin.mower
sealed trait Command
object Command {
def parseOne(ch: Char): Command =
ch match {
case 'A' => Forward
case 'G' => Left
case 'D' => Right
case _ => throw new NoSuchElementException
}
def parse(str: String): Seq[Command] =
str map parseOne
def apply(str: String) = parse(str)
}
case object Forward extends Command
case object Left extends Command
case object Right extends Command
|
Blackrush/mower
|
src/main/scala/achauvin/mower/commands.scala
|
Scala
|
mit
| 451
|
object Test extends dotty.runtime.LegacyApp {
val ms = """This is a long multiline string
with \\u000d\\u000a CRLF embedded."""
assert(ms.lines.size == 3, s"lines.size ${ms.lines.size}")
assert(ms contains "\\r\\n CRLF", "no CRLF")
}
|
folone/dotty
|
tests/run/t8015-ffc.scala
|
Scala
|
bsd-3-clause
| 239
|
package thistle.examples.webevent
import thistle.core.Query
import thistle.predicates.General.ofType
import thistle.predicates.Indexes.first
import thistle.examples.webevent._
import thistle.examples.webevent.Predicates.{clicked, referredBy, currentElementContainsListing, sameShop}
object Queries {
val PurchasedFromSearch = Query(
ofType[SearchEvent],
ofType[ListingEvent] && clicked,
ofType[PurchaseEvent] && currentElementContainsListing
)
val PurchaseChannel = Query(
ofType[ListingsDisplay],
ofType[ListingEvent] && clicked,
ofType[PurchaseEvent] && referredBy && currentElementContainsListing
)
val PurchasedIndirectlyFromSearch = Query(
ofType[SearchEvent],
ofType[ListingEvent] && clicked,
ofType[ListingEvent] && sameShop,
ofType[PurchaseEvent] && referredBy && currentElementContainsListing
)
def tabbedBrowsing(series: Seq[WebEvent]): Query[WebEvent] =
tabbedBrowsing(series.size)
def tabbedBrowsing(seriesSize: Int): Query[WebEvent] =
Query(
first,
(1 until seriesSize).map(i => !first && referredBy): _*
)
}
|
smarden1/thistle
|
src/main/scala/thistle/examples/webevent/Queries.scala
|
Scala
|
mit
| 1,109
|
package model.auth
import be.objectify.deadbolt.scala.models.{Permission, Role, Subject}
case class Admin(identifier: String, authToken: String) extends Subject {
val roles: List[Role] = List(AdminRole)
val permissions: List[Permission] = List(AdminPermission)
}
object AdminRole extends Role {
val name: String = "admin"
}
object AdminPermission extends Permission {
val value: String = "admin"
}
|
BandOf3/assignment-system-web
|
app/model/auth/Admin.scala
|
Scala
|
mit
| 411
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc.netty
import java.io._
import java.net.{InetSocketAddress, URI}
import java.nio.ByteBuffer
import java.nio.channels.{Pipe, ReadableByteChannel, WritableByteChannel}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import javax.annotation.Nullable
import scala.concurrent.{Future, Promise}
import scala.reflect.ClassTag
import scala.util.{DynamicVariable, Failure, Success, Try}
import scala.util.control.NonFatal
import org.apache.spark.{SecurityManager, SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.EXECUTOR_ID
import org.apache.spark.internal.config.Network._
import org.apache.spark.network.TransportContext
import org.apache.spark.network.client._
import org.apache.spark.network.crypto.{AuthClientBootstrap, AuthServerBootstrap}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.server._
import org.apache.spark.rpc._
import org.apache.spark.serializer.{JavaSerializer, JavaSerializerInstance, SerializationStream}
import org.apache.spark.util.{ByteBufferInputStream, ByteBufferOutputStream, ThreadUtils, Utils}
private[netty] class NettyRpcEnv(
val conf: SparkConf,
javaSerializerInstance: JavaSerializerInstance,
host: String,
securityManager: SecurityManager,
numUsableCores: Int) extends RpcEnv(conf) with Logging {
val role = conf.get(EXECUTOR_ID).map { id =>
if (id == SparkContext.DRIVER_IDENTIFIER) "driver" else "executor"
}
private[netty] val transportConf = SparkTransportConf.fromSparkConf(
conf.clone.set(RPC_IO_NUM_CONNECTIONS_PER_PEER, 1),
"rpc",
conf.get(RPC_IO_THREADS).getOrElse(numUsableCores),
role)
private val dispatcher: Dispatcher = new Dispatcher(this, numUsableCores)
private val streamManager = new NettyStreamManager(this)
private val transportContext = new TransportContext(transportConf,
new NettyRpcHandler(dispatcher, this, streamManager))
private def createClientBootstraps(): java.util.List[TransportClientBootstrap] = {
if (securityManager.isAuthenticationEnabled()) {
java.util.Arrays.asList(new AuthClientBootstrap(transportConf,
securityManager.getSaslUser(), securityManager))
} else {
java.util.Collections.emptyList[TransportClientBootstrap]
}
}
private val clientFactory = transportContext.createClientFactory(createClientBootstraps())
/**
* A separate client factory for file downloads. This avoids using the same RPC handler as
* the main RPC context, so that events caused by these clients are kept isolated from the
* main RPC traffic.
*
* It also allows for different configuration of certain properties, such as the number of
* connections per peer.
*/
@volatile private var fileDownloadFactory: TransportClientFactory = _
val timeoutScheduler = ThreadUtils.newDaemonSingleThreadScheduledExecutor("netty-rpc-env-timeout")
// Because TransportClientFactory.createClient is blocking, we need to run it in this thread pool
// to implement non-blocking send/ask.
// TODO: a non-blocking TransportClientFactory.createClient in future
private[netty] val clientConnectionExecutor = ThreadUtils.newDaemonCachedThreadPool(
"netty-rpc-connection",
conf.get(RPC_CONNECT_THREADS))
@volatile private var server: TransportServer = _
private val stopped = new AtomicBoolean(false)
/**
* A map for [[RpcAddress]] and [[Outbox]]. When we are connecting to a remote [[RpcAddress]],
* we just put messages to its [[Outbox]] to implement a non-blocking `send` method.
*/
private val outboxes = new ConcurrentHashMap[RpcAddress, Outbox]()
/**
* Remove the address's Outbox and stop it.
*/
private[netty] def removeOutbox(address: RpcAddress): Unit = {
val outbox = outboxes.remove(address)
if (outbox != null) {
outbox.stop()
}
}
def startServer(bindAddress: String, port: Int): Unit = {
val bootstraps: java.util.List[TransportServerBootstrap] =
if (securityManager.isAuthenticationEnabled()) {
java.util.Arrays.asList(new AuthServerBootstrap(transportConf, securityManager))
} else {
java.util.Collections.emptyList()
}
server = transportContext.createServer(bindAddress, port, bootstraps)
dispatcher.registerRpcEndpoint(
RpcEndpointVerifier.NAME, new RpcEndpointVerifier(this, dispatcher))
}
@Nullable
override lazy val address: RpcAddress = {
if (server != null) RpcAddress(host, server.getPort()) else null
}
override def setupEndpoint(name: String, endpoint: RpcEndpoint): RpcEndpointRef = {
dispatcher.registerRpcEndpoint(name, endpoint)
}
def asyncSetupEndpointRefByURI(uri: String): Future[RpcEndpointRef] = {
val addr = RpcEndpointAddress(uri)
val endpointRef = new NettyRpcEndpointRef(conf, addr, this)
val verifier = new NettyRpcEndpointRef(
conf, RpcEndpointAddress(addr.rpcAddress, RpcEndpointVerifier.NAME), this)
verifier.ask[Boolean](RpcEndpointVerifier.CheckExistence(endpointRef.name)).flatMap { find =>
if (find) {
Future.successful(endpointRef)
} else {
Future.failed(new RpcEndpointNotFoundException(uri))
}
}(ThreadUtils.sameThread)
}
override def stop(endpointRef: RpcEndpointRef): Unit = {
require(endpointRef.isInstanceOf[NettyRpcEndpointRef])
dispatcher.stop(endpointRef)
}
private def postToOutbox(receiver: NettyRpcEndpointRef, message: OutboxMessage): Unit = {
if (receiver.client != null) {
message.sendWith(receiver.client)
} else {
require(receiver.address != null,
"Cannot send message to client endpoint with no listen address.")
val targetOutbox = {
val outbox = outboxes.get(receiver.address)
if (outbox == null) {
val newOutbox = new Outbox(this, receiver.address)
val oldOutbox = outboxes.putIfAbsent(receiver.address, newOutbox)
if (oldOutbox == null) {
newOutbox
} else {
oldOutbox
}
} else {
outbox
}
}
if (stopped.get) {
// It's possible that we put `targetOutbox` after stopping. So we need to clean it.
outboxes.remove(receiver.address)
targetOutbox.stop()
} else {
targetOutbox.send(message)
}
}
}
private[netty] def send(message: RequestMessage): Unit = {
val remoteAddr = message.receiver.address
if (remoteAddr == address) {
// Message to a local RPC endpoint.
try {
dispatcher.postOneWayMessage(message)
} catch {
case e: RpcEnvStoppedException => logDebug(e.getMessage)
}
} else {
// Message to a remote RPC endpoint.
postToOutbox(message.receiver, OneWayOutboxMessage(message.serialize(this)))
}
}
private[netty] def createClient(address: RpcAddress): TransportClient = {
clientFactory.createClient(address.host, address.port)
}
private[netty] def askAbortable[T: ClassTag](
message: RequestMessage, timeout: RpcTimeout): AbortableRpcFuture[T] = {
val promise = Promise[Any]()
val remoteAddr = message.receiver.address
var rpcMsg: Option[RpcOutboxMessage] = None
def onFailure(e: Throwable): Unit = {
if (!promise.tryFailure(e)) {
e match {
case e : RpcEnvStoppedException => logDebug(s"Ignored failure: $e")
case _ => logWarning(s"Ignored failure: $e")
}
}
}
def onSuccess(reply: Any): Unit = reply match {
case RpcFailure(e) => onFailure(e)
case rpcReply =>
if (!promise.trySuccess(rpcReply)) {
logWarning(s"Ignored message: $reply")
}
}
def onAbort(t: Throwable): Unit = {
onFailure(t)
rpcMsg.foreach(_.onAbort())
}
try {
if (remoteAddr == address) {
val p = Promise[Any]()
p.future.onComplete {
case Success(response) => onSuccess(response)
case Failure(e) => onFailure(e)
}(ThreadUtils.sameThread)
dispatcher.postLocalMessage(message, p)
} else {
val rpcMessage = RpcOutboxMessage(message.serialize(this),
onFailure,
(client, response) => onSuccess(deserialize[Any](client, response)))
rpcMsg = Option(rpcMessage)
postToOutbox(message.receiver, rpcMessage)
promise.future.failed.foreach {
case _: TimeoutException => rpcMessage.onTimeout()
case _ =>
}(ThreadUtils.sameThread)
}
val timeoutCancelable = timeoutScheduler.schedule(new Runnable {
override def run(): Unit = {
val remoteRecAddr = if (remoteAddr == null) {
Try {
message.receiver.client.getChannel.remoteAddress()
}.toOption.orNull
} else {
remoteAddr
}
onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteRecAddr} " +
s"in ${timeout.duration}"))
}
}, timeout.duration.toNanos, TimeUnit.NANOSECONDS)
promise.future.onComplete { v =>
timeoutCancelable.cancel(true)
}(ThreadUtils.sameThread)
} catch {
case NonFatal(e) =>
onFailure(e)
}
new AbortableRpcFuture[T](
promise.future.mapTo[T].recover(timeout.addMessageIfTimeout)(ThreadUtils.sameThread),
onAbort)
}
private[netty] def ask[T: ClassTag](message: RequestMessage, timeout: RpcTimeout): Future[T] = {
askAbortable(message, timeout).future
}
private[netty] def serialize(content: Any): ByteBuffer = {
javaSerializerInstance.serialize(content)
}
/**
* Returns [[SerializationStream]] that forwards the serialized bytes to `out`.
*/
private[netty] def serializeStream(out: OutputStream): SerializationStream = {
javaSerializerInstance.serializeStream(out)
}
private[netty] def deserialize[T: ClassTag](client: TransportClient, bytes: ByteBuffer): T = {
NettyRpcEnv.currentClient.withValue(client) {
deserialize { () =>
javaSerializerInstance.deserialize[T](bytes)
}
}
}
override def endpointRef(endpoint: RpcEndpoint): RpcEndpointRef = {
dispatcher.getRpcEndpointRef(endpoint)
}
override def shutdown(): Unit = {
cleanup()
}
override def awaitTermination(): Unit = {
dispatcher.awaitTermination()
}
private def cleanup(): Unit = {
if (!stopped.compareAndSet(false, true)) {
return
}
val iter = outboxes.values().iterator()
while (iter.hasNext()) {
val outbox = iter.next()
outboxes.remove(outbox.address)
outbox.stop()
}
if (timeoutScheduler != null) {
timeoutScheduler.shutdownNow()
}
if (dispatcher != null) {
dispatcher.stop()
}
if (server != null) {
server.close()
}
if (clientFactory != null) {
clientFactory.close()
}
if (clientConnectionExecutor != null) {
clientConnectionExecutor.shutdownNow()
}
if (fileDownloadFactory != null) {
fileDownloadFactory.close()
}
if (transportContext != null) {
transportContext.close()
}
}
override def deserialize[T](deserializationAction: () => T): T = {
NettyRpcEnv.currentEnv.withValue(this) {
deserializationAction()
}
}
override def fileServer: RpcEnvFileServer = streamManager
override def openChannel(uri: String): ReadableByteChannel = {
val parsedUri = new URI(uri)
require(parsedUri.getHost() != null, "Host name must be defined.")
require(parsedUri.getPort() > 0, "Port must be defined.")
require(parsedUri.getPath() != null && parsedUri.getPath().nonEmpty, "Path must be defined.")
val pipe = Pipe.open()
val source = new FileDownloadChannel(pipe.source())
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
val client = downloadClient(parsedUri.getHost(), parsedUri.getPort())
val callback = new FileDownloadCallback(pipe.sink(), source, client)
client.stream(parsedUri.getPath(), callback)
})(catchBlock = {
pipe.sink().close()
source.close()
})
source
}
private def downloadClient(host: String, port: Int): TransportClient = {
if (fileDownloadFactory == null) synchronized {
if (fileDownloadFactory == null) {
val module = "files"
val prefix = "spark.rpc.io."
val clone = conf.clone()
// Copy any RPC configuration that is not overridden in the spark.files namespace.
conf.getAll.foreach { case (key, value) =>
if (key.startsWith(prefix)) {
val opt = key.substring(prefix.length())
clone.setIfMissing(s"spark.$module.io.$opt", value)
}
}
val ioThreads = clone.getInt("spark.files.io.threads", 1)
val downloadConf = SparkTransportConf.fromSparkConf(clone, module, ioThreads)
val downloadContext = new TransportContext(downloadConf, new NoOpRpcHandler(), true)
fileDownloadFactory = downloadContext.createClientFactory(createClientBootstraps())
}
}
fileDownloadFactory.createClient(host, port)
}
private class FileDownloadChannel(source: Pipe.SourceChannel) extends ReadableByteChannel {
@volatile private var error: Throwable = _
def setError(e: Throwable): Unit = {
// This setError callback is invoked by internal RPC threads in order to propagate remote
// exceptions to application-level threads which are reading from this channel. When an
// RPC error occurs, the RPC system will call setError() and then will close the
// Pipe.SinkChannel corresponding to the other end of the `source` pipe. Closing of the pipe
// sink will cause `source.read()` operations to return EOF, unblocking the application-level
// reading thread. Thus there is no need to actually call `source.close()` here in the
// onError() callback and, in fact, calling it here would be dangerous because the close()
// would be asynchronous with respect to the read() call and could trigger race-conditions
// that lead to data corruption. See the PR for SPARK-22982 for more details on this topic.
error = e
}
override def read(dst: ByteBuffer): Int = {
Try(source.read(dst)) match {
// See the documentation above in setError(): if an RPC error has occurred then setError()
// will be called to propagate the RPC error and then `source`'s corresponding
// Pipe.SinkChannel will be closed, unblocking this read. In that case, we want to propagate
// the remote RPC exception (and not any exceptions triggered by the pipe close, such as
// ChannelClosedException), hence this `error != null` check:
case _ if error != null => throw error
case Success(bytesRead) => bytesRead
case Failure(readErr) => throw readErr
}
}
override def close(): Unit = source.close()
override def isOpen(): Boolean = source.isOpen()
}
private class FileDownloadCallback(
sink: WritableByteChannel,
source: FileDownloadChannel,
client: TransportClient) extends StreamCallback {
override def onData(streamId: String, buf: ByteBuffer): Unit = {
while (buf.remaining() > 0) {
sink.write(buf)
}
}
override def onComplete(streamId: String): Unit = {
sink.close()
}
override def onFailure(streamId: String, cause: Throwable): Unit = {
logDebug(s"Error downloading stream $streamId.", cause)
source.setError(cause)
sink.close()
}
}
}
private[netty] object NettyRpcEnv extends Logging {
/**
* When deserializing the [[NettyRpcEndpointRef]], it needs a reference to [[NettyRpcEnv]].
* Use `currentEnv` to wrap the deserialization codes. E.g.,
*
* {{{
* NettyRpcEnv.currentEnv.withValue(this) {
* your deserialization codes
* }
* }}}
*/
private[netty] val currentEnv = new DynamicVariable[NettyRpcEnv](null)
/**
* Similar to `currentEnv`, this variable references the client instance associated with an
* RPC, in case it's needed to find out the remote address during deserialization.
*/
private[netty] val currentClient = new DynamicVariable[TransportClient](null)
}
private[rpc] class NettyRpcEnvFactory extends RpcEnvFactory with Logging {
def create(config: RpcEnvConfig): RpcEnv = {
val sparkConf = config.conf
// Use JavaSerializerInstance in multiple threads is safe. However, if we plan to support
// KryoSerializer in future, we have to use ThreadLocal to store SerializerInstance
val javaSerializerInstance =
new JavaSerializer(sparkConf).newInstance().asInstanceOf[JavaSerializerInstance]
val nettyEnv =
new NettyRpcEnv(sparkConf, javaSerializerInstance, config.advertiseAddress,
config.securityManager, config.numUsableCores)
if (!config.clientMode) {
val startNettyRpcEnv: Int => (NettyRpcEnv, Int) = { actualPort =>
nettyEnv.startServer(config.bindAddress, actualPort)
(nettyEnv, nettyEnv.address.port)
}
try {
Utils.startServiceOnPort(config.port, startNettyRpcEnv, sparkConf, config.name)._1
} catch {
case NonFatal(e) =>
nettyEnv.shutdown()
throw e
}
}
nettyEnv
}
}
/**
* The NettyRpcEnv version of RpcEndpointRef.
*
* This class behaves differently depending on where it's created. On the node that "owns" the
* RpcEndpoint, it's a simple wrapper around the RpcEndpointAddress instance.
*
* On other machines that receive a serialized version of the reference, the behavior changes. The
* instance will keep track of the TransportClient that sent the reference, so that messages
* to the endpoint are sent over the client connection, instead of needing a new connection to
* be opened.
*
* The RpcAddress of this ref can be null; what that means is that the ref can only be used through
* a client connection, since the process hosting the endpoint is not listening for incoming
* connections. These refs should not be shared with 3rd parties, since they will not be able to
* send messages to the endpoint.
*
* @param conf Spark configuration.
* @param endpointAddress The address where the endpoint is listening.
* @param nettyEnv The RpcEnv associated with this ref.
*/
private[netty] class NettyRpcEndpointRef(
@transient private val conf: SparkConf,
private val endpointAddress: RpcEndpointAddress,
@transient @volatile private var nettyEnv: NettyRpcEnv) extends RpcEndpointRef(conf) {
@transient @volatile var client: TransportClient = _
override def address: RpcAddress =
if (endpointAddress.rpcAddress != null) endpointAddress.rpcAddress else null
private def readObject(in: ObjectInputStream): Unit = {
in.defaultReadObject()
nettyEnv = NettyRpcEnv.currentEnv.value
client = NettyRpcEnv.currentClient.value
}
private def writeObject(out: ObjectOutputStream): Unit = {
out.defaultWriteObject()
}
override def name: String = endpointAddress.name
override def askAbortable[T: ClassTag](
message: Any, timeout: RpcTimeout): AbortableRpcFuture[T] = {
nettyEnv.askAbortable(new RequestMessage(nettyEnv.address, this, message), timeout)
}
override def ask[T: ClassTag](message: Any, timeout: RpcTimeout): Future[T] = {
askAbortable(message, timeout).future
}
override def send(message: Any): Unit = {
require(message != null, "Message is null")
nettyEnv.send(new RequestMessage(nettyEnv.address, this, message))
}
override def toString: String = s"NettyRpcEndpointRef(${endpointAddress})"
final override def equals(that: Any): Boolean = that match {
case other: NettyRpcEndpointRef => endpointAddress == other.endpointAddress
case _ => false
}
final override def hashCode(): Int =
if (endpointAddress == null) 0 else endpointAddress.hashCode()
}
/**
* The message that is sent from the sender to the receiver.
*
* @param senderAddress the sender address. It's `null` if this message is from a client
* `NettyRpcEnv`.
* @param receiver the receiver of this message.
* @param content the message content.
*/
private[netty] class RequestMessage(
val senderAddress: RpcAddress,
val receiver: NettyRpcEndpointRef,
val content: Any) {
/** Manually serialize [[RequestMessage]] to minimize the size. */
def serialize(nettyEnv: NettyRpcEnv): ByteBuffer = {
val bos = new ByteBufferOutputStream()
val out = new DataOutputStream(bos)
try {
writeRpcAddress(out, senderAddress)
writeRpcAddress(out, receiver.address)
out.writeUTF(receiver.name)
val s = nettyEnv.serializeStream(out)
try {
s.writeObject(content)
} finally {
s.close()
}
} finally {
out.close()
}
bos.toByteBuffer
}
private def writeRpcAddress(out: DataOutputStream, rpcAddress: RpcAddress): Unit = {
if (rpcAddress == null) {
out.writeBoolean(false)
} else {
out.writeBoolean(true)
out.writeUTF(rpcAddress.host)
out.writeInt(rpcAddress.port)
}
}
override def toString: String = s"RequestMessage($senderAddress, $receiver, $content)"
}
private[netty] object RequestMessage {
private def readRpcAddress(in: DataInputStream): RpcAddress = {
val hasRpcAddress = in.readBoolean()
if (hasRpcAddress) {
RpcAddress(in.readUTF(), in.readInt())
} else {
null
}
}
def apply(nettyEnv: NettyRpcEnv, client: TransportClient, bytes: ByteBuffer): RequestMessage = {
val bis = new ByteBufferInputStream(bytes)
val in = new DataInputStream(bis)
try {
val senderAddress = readRpcAddress(in)
val endpointAddress = RpcEndpointAddress(readRpcAddress(in), in.readUTF())
val ref = new NettyRpcEndpointRef(nettyEnv.conf, endpointAddress, nettyEnv)
ref.client = client
new RequestMessage(
senderAddress,
ref,
// The remaining bytes in `bytes` are the message content.
nettyEnv.deserialize(client, bytes))
} finally {
in.close()
}
}
}
/**
* A response that indicates some failure happens in the receiver side.
*/
private[netty] case class RpcFailure(e: Throwable)
/**
* Dispatches incoming RPCs to registered endpoints.
*
* The handler keeps track of all client instances that communicate with it, so that the RpcEnv
* knows which `TransportClient` instance to use when sending RPCs to a client endpoint (i.e.,
* one that is not listening for incoming connections, but rather needs to be contacted via the
* client socket).
*
* Events are sent on a per-connection basis, so if a client opens multiple connections to the
* RpcEnv, multiple connection / disconnection events will be created for that client (albeit
* with different `RpcAddress` information).
*/
private[netty] class NettyRpcHandler(
dispatcher: Dispatcher,
nettyEnv: NettyRpcEnv,
streamManager: StreamManager) extends RpcHandler with Logging {
// A variable to track the remote RpcEnv addresses of all clients
private val remoteAddresses = new ConcurrentHashMap[RpcAddress, RpcAddress]()
override def receive(
client: TransportClient,
message: ByteBuffer,
callback: RpcResponseCallback): Unit = {
val messageToDispatch = internalReceive(client, message)
dispatcher.postRemoteMessage(messageToDispatch, callback)
}
override def receive(
client: TransportClient,
message: ByteBuffer): Unit = {
val messageToDispatch = internalReceive(client, message)
dispatcher.postOneWayMessage(messageToDispatch)
}
private def internalReceive(client: TransportClient, message: ByteBuffer): RequestMessage = {
val addr = client.getChannel().remoteAddress().asInstanceOf[InetSocketAddress]
assert(addr != null)
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
val requestMessage = RequestMessage(nettyEnv, client, message)
if (requestMessage.senderAddress == null) {
// Create a new message with the socket address of the client as the sender.
new RequestMessage(clientAddr, requestMessage.receiver, requestMessage.content)
} else {
// The remote RpcEnv listens to some port, we should also fire a RemoteProcessConnected for
// the listening address
val remoteEnvAddress = requestMessage.senderAddress
if (remoteAddresses.putIfAbsent(clientAddr, remoteEnvAddress) == null) {
dispatcher.postToAll(RemoteProcessConnected(remoteEnvAddress))
}
requestMessage
}
}
override def getStreamManager: StreamManager = streamManager
override def exceptionCaught(cause: Throwable, client: TransportClient): Unit = {
val addr = client.getChannel.remoteAddress().asInstanceOf[InetSocketAddress]
if (addr != null) {
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
dispatcher.postToAll(RemoteProcessConnectionError(cause, clientAddr))
// If the remove RpcEnv listens to some address, we should also fire a
// RemoteProcessConnectionError for the remote RpcEnv listening address
val remoteEnvAddress = remoteAddresses.get(clientAddr)
if (remoteEnvAddress != null) {
dispatcher.postToAll(RemoteProcessConnectionError(cause, remoteEnvAddress))
}
} else {
// If the channel is closed before connecting, its remoteAddress will be null.
// See java.net.Socket.getRemoteSocketAddress
// Because we cannot get a RpcAddress, just log it
logError("Exception before connecting to the client", cause)
}
}
override def channelActive(client: TransportClient): Unit = {
val addr = client.getChannel().remoteAddress().asInstanceOf[InetSocketAddress]
assert(addr != null)
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
dispatcher.postToAll(RemoteProcessConnected(clientAddr))
}
override def channelInactive(client: TransportClient): Unit = {
val addr = client.getChannel.remoteAddress().asInstanceOf[InetSocketAddress]
if (addr != null) {
val clientAddr = RpcAddress(addr.getHostString, addr.getPort)
nettyEnv.removeOutbox(clientAddr)
dispatcher.postToAll(RemoteProcessDisconnected(clientAddr))
val remoteEnvAddress = remoteAddresses.remove(clientAddr)
// If the remove RpcEnv listens to some address, we should also fire a
// RemoteProcessDisconnected for the remote RpcEnv listening address
if (remoteEnvAddress != null) {
dispatcher.postToAll(RemoteProcessDisconnected(remoteEnvAddress))
}
} else {
// If the channel is closed before connecting, its remoteAddress will be null. In this case,
// we can ignore it since we don't fire "Associated".
// See java.net.Socket.getRemoteSocketAddress
}
}
}
|
maropu/spark
|
core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala
|
Scala
|
apache-2.0
| 27,722
|
package home.yang.dataflow.filter
import home.yang.dataflow.Filter
/**
* Created by Administrator on 2016/5/2 0002.
*/
class ShortFilter(val keyName:String, isPass:(Ordered[Short])=>Boolean) extends Filter[Short](isPass){
override def createShort(short: Short): Any = {
isPass(short)
}
}
|
wjingyao2008/firsttry
|
dataflow/src/main/scala/home/yang/dataflow/filter/ShortFilter.scala
|
Scala
|
apache-2.0
| 300
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.jmock
import org.scalatest._
import org.jmock.Expectations
import org.hamcrest.core.IsAnything
import org.scalatest.events._
trait SuiteExpectations {
def expectSingleTestToPass(expectations: Expectations, reporter: Reporter) = expectNTestsToPass(expectations, 1, reporter)
def expectSingleTestToFail(expectations: Expectations, reporter: Reporter) = expectNTestsToFail(expectations, 1, reporter)
def expectNTestsToPass(expectations: Expectations, n: Int, reporter: Reporter) = {
expectNTestsToRun(expectations, n, reporter) {
expectations.one(reporter).apply(expectations.`with`(new IsAnything[TestSucceeded]))
}
}
def expectNTestsToFail(expectations: Expectations, n: Int, reporter: Reporter) = {
expectNTestsToRun(expectations, n, reporter) {
expectations.one(reporter).apply(expectations.`with`(new IsAnything[TestFailed]))
}
}
def expectNTestsToRun(expectations: Expectations, n: Int, reporter: Reporter)(f: => Unit) = {
expectations.never(reporter).apply(expectations.`with`(new IsAnything[SuiteStarting]))
for( i <- 1 to n ){
expectations.one(reporter).apply(expectations.`with`(new IsAnything[TestStarting]))
f
}
expectations.never(reporter).apply(expectations.`with`(new IsAnything[SuiteCompleted]))
}
}
|
SRGOM/scalatest
|
scalatest-test/src/test/scala/org/scalatest/jmock/SuiteExpectations.scala
|
Scala
|
apache-2.0
| 1,921
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io._
import java.net.URL
import java.nio.file.{Files, Paths}
import java.sql.{Date, Timestamp}
import java.util.{Locale, TimeZone, UUID}
import scala.collection.JavaConverters._
import org.apache.avro.Schema
import org.apache.avro.Schema.{Field, Type}
import org.apache.avro.Schema.Type._
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.generic.GenericData.{EnumSymbol, Fixed}
import org.apache.commons.io.FileUtils
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.TestingUDT.{IntervalData, NullData, NullUDT}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
abstract class AvroSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
import testImplicits._
val episodesAvro = testFile("episodes.avro")
val testAvro = testFile("test.avro")
override protected def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.FILES_MAX_PARTITION_BYTES.key, 1024)
}
def checkReloadMatchesSaved(originalFile: String, newFile: String): Unit = {
val originalEntries = spark.read.format("avro").load(testAvro).collect()
val newEntries = spark.read.format("avro").load(newFile)
checkAnswer(newEntries, originalEntries)
}
def checkAvroSchemaEquals(avroSchema: String, expectedAvroSchema: String): Unit = {
assert(new Schema.Parser().parse(avroSchema) ==
new Schema.Parser().parse(expectedAvroSchema))
}
def getAvroSchemaStringFromFiles(filePath: String): String = {
new DataFileReader({
val file = new File(filePath)
if (file.isFile) {
file
} else {
file.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
.head
}
}, new GenericDatumReader[Any]()).getSchema.toString(false)
}
test("resolve avro data source") {
val databricksAvro = "com.databricks.spark.avro"
// By default the backward compatibility for com.databricks.spark.avro is enabled.
Seq("org.apache.spark.sql.avro.AvroFileFormat", databricksAvro).foreach { provider =>
assert(DataSource.lookupDataSource(provider, spark.sessionState.conf) ===
classOf[org.apache.spark.sql.avro.AvroFileFormat])
}
withSQLConf(SQLConf.LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED.key -> "false") {
val message = intercept[AnalysisException] {
DataSource.lookupDataSource(databricksAvro, spark.sessionState.conf)
}.getMessage
assert(message.contains(s"Failed to find data source: $databricksAvro"))
}
}
test("reading from multiple paths") {
val df = spark.read.format("avro").load(episodesAvro, episodesAvro)
assert(df.count == 16)
}
test("reading and writing partitioned data") {
val df = spark.read.format("avro").load(episodesAvro)
val fields = List("title", "air_date", "doctor")
for (field <- fields) {
withTempPath { dir =>
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.partitionBy(field).format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
// makes sure that no fields got dropped.
// We convert Rows to Seqs in order to work around SPARK-10325
assert(input.select(field).collect().map(_.toSeq).toSet ===
df.select(field).collect().map(_.toSeq).toSet)
}
}
}
test("request no fields") {
val df = spark.read.format("avro").load(episodesAvro)
df.createOrReplaceTempView("avro_table")
assert(spark.sql("select count(*) from avro_table").collect().head === Row(8))
}
test("convert formats") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.write.parquet(dir.getCanonicalPath)
assert(spark.read.parquet(dir.getCanonicalPath).count() === df.count)
}
}
test("rearrange internal schema") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.select("doctor", "title").write.format("avro").save(dir.getCanonicalPath)
}
}
test("union(int, long) is read as long") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.INT), Schema.create(Type.LONG)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toLong)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", LongType, nullable = true)))
assert(df.collect().toSet == Set(Row(1L), Row(2L)))
}
}
test("union(float, double) is read as double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.FLOAT), Schema.create(Type.DOUBLE)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2.toDouble)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(2.toDouble)))
}
}
test("union(float, double, null) is read as nullable double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union = Schema.createUnion(
List(Schema.create(Type.FLOAT),
Schema.create(Type.DOUBLE),
Schema.create(Type.NULL)
).asJava
)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", null)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(null)))
}
}
test("Union of a single type") {
withTempPath { dir =>
val UnionOfOne = Schema.createUnion(List(Schema.create(Type.INT)).asJava)
val fields = Seq(new Field("field1", UnionOfOne, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 8)
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.first() == Row(8))
}
}
test("SPARK-27858 Union type: More than one non-null type") {
withTempDir { dir =>
val complexNullUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.NULL), Schema.create(Type.STRING)).asJava)
val fields = Seq(
new Field("field1", complexNullUnionType, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 42)
dataFileWriter.append(avroRec)
val avroRec2 = new GenericData.Record(schema)
avroRec2.put("field1", "Alice")
dataFileWriter.append(avroRec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema === StructType.fromDDL("field1 struct<member0: int, member1: string>"))
assert(df.collect().toSet == Set(Row(Row(42, null)), Row(Row(null, "Alice"))))
}
}
test("Complex Union Type") {
withTempPath { dir =>
val fixedSchema = Schema.createFixed("fixed_name", "doc", "namespace", 4)
val enumSchema = Schema.createEnum("enum_name", "doc", "namespace", List("e1", "e2").asJava)
val complexUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.STRING), fixedSchema, enumSchema).asJava)
val fields = Seq(
new Field("field1", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field2", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field3", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field4", complexUnionType, "doc", null.asInstanceOf[AnyVal])
).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
val field1 = 1234
val field2 = "Hope that was not load bearing"
val field3 = Array[Byte](1, 2, 3, 4)
val field4 = "e2"
avroRec.put("field1", field1)
avroRec.put("field2", field2)
avroRec.put("field3", new Fixed(fixedSchema, field3))
avroRec.put("field4", new EnumSymbol(enumSchema, field4))
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.sqlContext.read.format("avro").load(s"$dir.avro")
assertResult(field1)(df.selectExpr("field1.member0").first().get(0))
assertResult(field2)(df.selectExpr("field2.member1").first().get(0))
assertResult(field3)(df.selectExpr("field3.member2").first().get(0))
assertResult(field4)(df.selectExpr("field4.member3").first().get(0))
}
}
test("Lots of nulls") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("binary", BinaryType, true),
StructField("timestamp", TimestampType, true),
StructField("array", ArrayType(ShortType), true),
StructField("map", MapType(StringType, StringType), true),
StructField("struct", StructType(Seq(StructField("int", IntegerType, true))))))
val rdd = spark.sparkContext.parallelize(Seq[Row](
Row(null, new Timestamp(1), Array[Short](1, 2, 3), null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null)))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("Struct field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("short", ShortType, true),
StructField("byte", ByteType, true),
StructField("boolean", BooleanType, true)
))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, 1.toShort, 1.toByte, true),
Row(2f, 2.toShort, 2.toByte, true),
Row(3f, 3.toShort, 3.toByte, true)
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
private def createDummyCorruptFile(dir: File): Unit = {
Utils.tryWithResource {
FileUtils.forceMkdir(dir)
val corruptFile = new File(dir, "corrupt.avro")
new BufferedWriter(new FileWriter(corruptFile))
} { writer =>
writer.write("corrupt")
}
}
test("Ignore corrupt Avro file if flag IGNORE_CORRUPT_FILES enabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[FileNotFoundException] {
spark.read.format("avro").load(dir.getAbsolutePath).schema
}.getMessage
assert(message.contains("No Avro files found."))
val srcFile = new File("src/test/resources/episodes.avro")
val destFile = new File(dir, "episodes.avro")
FileUtils.copyFile(srcFile, destFile)
val result = spark.read.format("avro").load(srcFile.getAbsolutePath).collect()
checkAnswer(spark.read.format("avro").load(dir.getAbsolutePath), result)
}
}
}
test("Throws IOException on reading corrupt Avro file if flag IGNORE_CORRUPT_FILES disabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[org.apache.spark.SparkException] {
spark.read.format("avro").load(dir.getAbsolutePath)
}.getMessage
assert(message.contains("Could not read file"))
}
}
}
test("Date field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("date", DateType, true)
))
TimeZone.setDefault(TimeZone.getTimeZone("UTC"))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, null),
Row(2f, new Date(1451948400000L)),
Row(3f, new Date(1460066400500L))
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
checkAnswer(
spark.read.format("avro").load(dir.toString).select("date"),
Seq(Row(null), Row(new Date(1451865600000L)), Row(new Date(1459987200000L))))
}
}
test("Array data types") {
withTempPath { dir =>
val testSchema = StructType(Seq(
StructField("byte_array", ArrayType(ByteType), true),
StructField("short_array", ArrayType(ShortType), true),
StructField("float_array", ArrayType(FloatType), true),
StructField("bool_array", ArrayType(BooleanType), true),
StructField("long_array", ArrayType(LongType), true),
StructField("double_array", ArrayType(DoubleType), true),
StructField("decimal_array", ArrayType(DecimalType(10, 0)), true),
StructField("bin_array", ArrayType(BinaryType), true),
StructField("timestamp_array", ArrayType(TimestampType), true),
StructField("array_array", ArrayType(ArrayType(StringType), true), true),
StructField("struct_array", ArrayType(
StructType(Seq(StructField("name", StringType, true)))))))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val rdd = spark.sparkContext.parallelize(Seq(
Row(arrayOfByte, Array[Short](1, 2, 3, 4), Array[Float](1f, 2f, 3f, 4f),
Array[Boolean](true, false, true, false), Array[Long](1L, 2L), Array[Double](1.0, 2.0),
Array[BigDecimal](BigDecimal.valueOf(3)), Array[Array[Byte]](arrayOfByte, arrayOfByte),
Array[Timestamp](new Timestamp(0)),
Array[Array[String]](Array[String]("CSH, tearing down the walls that divide us", "-jd")),
Array[Row](Row("Bobby G. can't swim")))))
val df = spark.createDataFrame(rdd, testSchema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("write with compression - sql configs") {
withTempPath { dir =>
val uncompressDir = s"$dir/uncompress"
val bzip2Dir = s"$dir/bzip2"
val xzDir = s"$dir/xz"
val deflateDir = s"$dir/deflate"
val snappyDir = s"$dir/snappy"
val df = spark.read.format("avro").load(testAvro)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "uncompressed")
df.write.format("avro").save(uncompressDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "bzip2")
df.write.format("avro").save(bzip2Dir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "xz")
df.write.format("avro").save(xzDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "deflate")
spark.conf.set(SQLConf.AVRO_DEFLATE_LEVEL.key, "9")
df.write.format("avro").save(deflateDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "snappy")
df.write.format("avro").save(snappyDir)
val uncompressSize = FileUtils.sizeOfDirectory(new File(uncompressDir))
val bzip2Size = FileUtils.sizeOfDirectory(new File(bzip2Dir))
val xzSize = FileUtils.sizeOfDirectory(new File(xzDir))
val deflateSize = FileUtils.sizeOfDirectory(new File(deflateDir))
val snappySize = FileUtils.sizeOfDirectory(new File(snappyDir))
assert(uncompressSize > deflateSize)
assert(snappySize > deflateSize)
assert(snappySize > bzip2Size)
assert(bzip2Size > xzSize)
}
}
test("dsl test") {
val results = spark.read.format("avro").load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("old avro data source name works") {
val results =
spark.read.format("com.databricks.spark.avro")
.load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("support of various data types") {
// This test uses data from test.avro. You can see the data and the schema of this file in
// test.json and test.avsc
val all = spark.read.format("avro").load(testAvro).collect()
assert(all.length == 3)
val str = spark.read.format("avro").load(testAvro).select("string").collect()
assert(str.map(_(0)).toSet.contains("Terran is IMBA!"))
val simple_map = spark.read.format("avro").load(testAvro).select("simple_map").collect()
assert(simple_map(0)(0).getClass.toString.contains("Map"))
assert(simple_map.map(_(0).asInstanceOf[Map[String, Some[Int]]].size).toSet == Set(2, 0))
val union0 = spark.read.format("avro").load(testAvro).select("union_string_null").collect()
assert(union0.map(_(0)).toSet == Set("abc", "123", null))
val union1 = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(union1.map(_(0)).toSet == Set(66, 1, null))
val union2 = spark.read.format("avro").load(testAvro).select("union_float_double").collect()
assert(
union2
.map(x => java.lang.Double.valueOf(x(0).toString))
.exists(p => Math.abs(p - Math.PI) < 0.001))
val fixed = spark.read.format("avro").load(testAvro).select("fixed3").collect()
assert(fixed.map(_(0).asInstanceOf[Array[Byte]]).exists(p => p(1) == 3))
val enum = spark.read.format("avro").load(testAvro).select("enum").collect()
assert(enum.map(_(0)).toSet == Set("SPADES", "CLUBS", "DIAMONDS"))
val record = spark.read.format("avro").load(testAvro).select("record").collect()
assert(record(0)(0).getClass.toString.contains("Row"))
assert(record.map(_(0).asInstanceOf[Row](0)).contains("TEST_STR123"))
val array_of_boolean =
spark.read.format("avro").load(testAvro).select("array_of_boolean").collect()
assert(array_of_boolean.map(_(0).asInstanceOf[Seq[Boolean]].size).toSet == Set(3, 1, 0))
val bytes = spark.read.format("avro").load(testAvro).select("bytes").collect()
assert(bytes.map(_(0).asInstanceOf[Array[Byte]].length).toSet == Set(3, 1, 0))
}
test("sql test") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW avroTable
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM avroTable").collect().length === 8)
}
test("conversion to avro and back") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { dir =>
val avroDir = s"$dir/avro"
spark.read.format("avro").load(testAvro).write.format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
}
}
test("conversion to avro and back with namespace") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { tempDir =>
val name = "AvroTest"
val namespace = "org.apache.spark.avro"
val parameters = Map("recordName" -> name, "recordNamespace" -> namespace)
val avroDir = tempDir + "/namedAvro"
spark.read.format("avro").load(testAvro)
.write.options(parameters).format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
// Look at raw file and make sure has namespace info
val rawSaved = spark.sparkContext.textFile(avroDir)
val schema = rawSaved.collect().mkString("")
assert(schema.contains(name))
assert(schema.contains(namespace))
}
}
test("converting some specific sparkSQL types to avro") {
withTempPath { tempDir =>
val testSchema = StructType(Seq(
StructField("Name", StringType, false),
StructField("Length", IntegerType, true),
StructField("Time", TimestampType, false),
StructField("Decimal", DecimalType(10, 2), true),
StructField("Binary", BinaryType, false)))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val cityRDD = spark.sparkContext.parallelize(Seq(
Row("San Francisco", 12, new Timestamp(666), null, arrayOfByte),
Row("Palo Alto", null, new Timestamp(777), null, arrayOfByte),
Row("Munich", 8, new Timestamp(42), Decimal(3.14), arrayOfByte)))
val cityDataFrame = spark.createDataFrame(cityRDD, testSchema)
val avroDir = tempDir + "/avro"
cityDataFrame.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 3)
// TimesStamps are converted to longs
val times = spark.read.format("avro").load(avroDir).select("Time").collect()
assert(times.map(_(0)).toSet ==
Set(new Timestamp(666), new Timestamp(777), new Timestamp(42)))
// DecimalType should be converted to string
val decimals = spark.read.format("avro").load(avroDir).select("Decimal").collect()
assert(decimals.map(_(0)).contains(new java.math.BigDecimal("3.14")))
// There should be a null entry
val length = spark.read.format("avro").load(avroDir).select("Length").collect()
assert(length.map(_(0)).contains(null))
val binary = spark.read.format("avro").load(avroDir).select("Binary").collect()
for (i <- arrayOfByte.indices) {
assert(binary(1)(0).asInstanceOf[Array[Byte]](i) == arrayOfByte(i))
}
}
}
test("correctly read long as date/timestamp type") {
withTempPath { tempDir =>
val currentTime = new Timestamp(System.currentTimeMillis())
val currentDate = new Date(System.currentTimeMillis())
val schema = StructType(Seq(
StructField("_1", DateType, false), StructField("_2", TimestampType, false)))
val writeDs = Seq((currentDate, currentTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 1)
val readDs = spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)]
assert(readDs.collect().sameElements(writeDs.collect()))
}
}
test("support of globbed paths") {
val resourceDir = testFile(".")
val e1 = spark.read.format("avro").load(resourceDir + "../*/episodes.avro").collect()
assert(e1.length == 8)
val e2 = spark.read.format("avro").load(resourceDir + "../../*/*/episodes.avro").collect()
assert(e2.length == 8)
}
test("does not coerce null date/timestamp value to 0 epoch.") {
withTempPath { tempDir =>
val nullTime: Timestamp = null
val nullDate: Date = null
val schema = StructType(Seq(
StructField("_1", DateType, nullable = true),
StructField("_2", TimestampType, nullable = true))
)
val writeDs = Seq((nullDate, nullTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
val readValues =
spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)].collect
assert(readValues.size == 1)
assert(readValues.head == ((nullDate, nullTime)))
}
}
test("support user provided avro schema") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "string",
| "type" : "string",
| "doc" : "Meaningless string of characters"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("support user provided avro schema with defaults for missing fields") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "missingField",
| "type" : "string",
| "default" : "foo"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro").load(testAvro).select("missingField").first
assert(result === Row("foo"))
}
test("support user provided avro schema for writing nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": [{ "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing data not in the enum will throw an exception
val message = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write \\"NOT-IN-ENUM\\" since it's not defined in enum"))
}
}
test("support user provided avro schema for writing non-nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": { "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }
| }]
|}
""".stripMargin
val dfWithNull = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val df = spark.createDataFrame(dfWithNull.na.drop().rdd,
StructType(Seq(StructField("Suit", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing nulls without using avro union type will
// throw an exception as avro uses union type to handle null.
val message1 = intercept[SparkException] {
dfWithNull.write.format("avro")
.option("avroSchema", avroSchema).save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.avro.AvroTypeException: Not an enum: null"))
// Writing df containing data not in the enum will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write \\"NOT-IN-ENUM\\" since it's not defined in enum"))
}
}
test("support user provided avro schema for writing nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": [{ "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(null))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 3 bytes of binary data into FIXED Type with size of 2 bytes"))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 1 byte of binary data into FIXED Type with size of 2 bytes"))
}
}
test("support user provided avro schema for writing non-nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": { "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(Array(1, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message1.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 3 bytes of binary data into FIXED Type with size of 2 bytes"))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val message2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message2.contains("org.apache.spark.sql.avro.IncompatibleSchemaException: " +
"Cannot write 1 byte of binary data into FIXED Type with size of 2 bytes"))
}
}
test("support user provided avro schema for writing / reading fields with different ordering") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val avroSchemaReversed =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Name", "type": "string"},
| {"name": "Age", "type": "int"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))),
StructType(Seq(
StructField("Age", IntegerType, false),
StructField("Name", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
// Writing avro file with reversed field ordering
df.write.format("avro").option("avroSchema", avroSchemaReversed).save(tempSaveDir)
// Reading reversed avro file
checkAnswer(df.select("Name", "Age"), spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchemaReversed, getAvroSchemaStringFromFiles(tempSaveDir))
// Reading reversed avro file with provided original schema
val avroDf = spark.read.format("avro").option("avroSchema", avroSchema).load(tempSaveDir)
checkAnswer(df, avroDf)
assert(avroDf.schema.fieldNames.sameElements(Array("Age", "Name")))
}
}
test("support user provided non-nullable avro schema " +
"for nullable catalyst schema without any null record") {
withTempPath { tempDir =>
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, true),
StructField("Name", StringType, true)))
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
val message = intercept[Exception] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, null))), catalystSchema)
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("Caused by: java.lang.NullPointerException: " +
"in test_schema in string null of string in field Name"))
}
}
test("error handling for unsupported Interval data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
var msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.contains("Cannot save interval data type into external storage.") ||
msg.contains("AVRO data source does not support interval data type."))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new IntervalData())
sql("select testType()").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(s"avro data source does not support interval data type."))
}
}
test("support Null data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
val df = sql("select null")
df.write.format("avro").mode("overwrite").save(tempDir)
checkAnswer(spark.read.format("avro").load(tempDir), df)
}
}
test("throw exception if unable to write with user provided Avro schema") {
val input: Seq[(DataType, Schema.Type)] = Seq(
(NullType, NULL),
(BooleanType, BOOLEAN),
(ByteType, INT),
(ShortType, INT),
(IntegerType, INT),
(LongType, LONG),
(FloatType, FLOAT),
(DoubleType, DOUBLE),
(BinaryType, BYTES),
(DateType, INT),
(TimestampType, LONG),
(DecimalType(4, 2), BYTES)
)
def assertException(f: () => AvroSerializer) {
val message = intercept[org.apache.spark.sql.avro.IncompatibleSchemaException] {
f()
}.getMessage
assert(message.contains("Cannot convert Catalyst type"))
}
def resolveNullable(schema: Schema, nullable: Boolean): Schema = {
if (nullable && schema.getType != NULL) {
Schema.createUnion(schema, Schema.create(NULL))
} else {
schema
}
}
for {
i <- input
j <- input
nullable <- Seq(true, false)
} if (i._2 != j._2) {
val avroType = resolveNullable(Schema.create(j._2), nullable)
val avroArrayType = resolveNullable(Schema.createArray(avroType), nullable)
val avroMapType = resolveNullable(Schema.createMap(avroType), nullable)
val name = "foo"
val avroField = new Field(name, avroType, "", null.asInstanceOf[AnyVal])
val recordSchema = Schema.createRecord("name", "doc", "space", true, Seq(avroField).asJava)
val avroRecordType = resolveNullable(recordSchema, nullable)
val catalystType = i._1
val catalystArrayType = ArrayType(catalystType, nullable)
val catalystMapType = MapType(StringType, catalystType, nullable)
val catalystStructType = StructType(Seq(StructField(name, catalystType, nullable)))
for {
avro <- Seq(avroType, avroArrayType, avroMapType, avroRecordType)
catalyst <- Seq(catalystType, catalystArrayType, catalystMapType, catalystStructType)
} {
assertException(() => new AvroSerializer(catalyst, avro, nullable))
}
}
}
test("reading from invalid path throws exception") {
// Directory given has no avro files
intercept[AnalysisException] {
withTempPath(dir => spark.read.format("avro").load(dir.getCanonicalPath))
}
intercept[AnalysisException] {
spark.read.format("avro").load("very/invalid/path/123.avro")
}
// In case of globbed path that can't be matched to anything, another exception is thrown (and
// exception message is helpful)
intercept[AnalysisException] {
spark.read.format("avro").load("*/*/*/*/*/*/*/something.avro")
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
spark.read.format("avro").load(dir.toString)
}
}
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(dir.toString)
}
}
}
test("SQL test insert overwrite") {
withTempPath { tempDir =>
val tempEmptyDir = s"$tempDir/sqlOverwrite"
// Create a temp directory for table that will be overwritten
new File(tempEmptyDir).mkdirs()
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodes
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodesEmpty
|(name string, air_date string, doctor int)
|USING avro
|OPTIONS (path "$tempEmptyDir")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodes").collect().length === 8)
assert(spark.sql("SELECT * FROM episodesEmpty").collect().isEmpty)
spark.sql(
s"""
|INSERT OVERWRITE TABLE episodesEmpty
|SELECT * FROM episodes
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodesEmpty").collect().length == 8)
}
}
test("test save and load") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("test load with non-Avro file") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count() == 8)
}
}
}
test("read avro with user defined schema: read partial columns") {
val partialColumns = StructType(Seq(
StructField("string", StringType, false),
StructField("simple_map", MapType(StringType, IntegerType), false),
StructField("complex_map", MapType(StringType, MapType(StringType, StringType)), false),
StructField("union_string_null", StringType, true),
StructField("union_int_long_null", LongType, true),
StructField("fixed3", BinaryType, true),
StructField("fixed2", BinaryType, true),
StructField("enum", StringType, false),
StructField("record", StructType(Seq(StructField("value_field", StringType, false))), false),
StructField("array_of_boolean", ArrayType(BooleanType), false),
StructField("bytes", BinaryType, true)))
val withSchema = spark.read.schema(partialColumns).format("avro").load(testAvro).collect()
val withOutSchema = spark
.read
.format("avro")
.load(testAvro)
.select("string", "simple_map", "complex_map", "union_string_null", "union_int_long_null",
"fixed3", "fixed2", "enum", "record", "array_of_boolean", "bytes")
.collect()
assert(withSchema.sameElements(withOutSchema))
}
test("read avro with user defined schema: read non-exist columns") {
val schema =
StructType(
Seq(
StructField("non_exist_string", StringType, true),
StructField(
"record",
StructType(Seq(
StructField("non_exist_field", StringType, false),
StructField("non_exist_field2", StringType, false))),
false)))
val withEmptyColumn = spark.read.schema(schema).format("avro").load(testAvro).collect()
assert(withEmptyColumn.forall(_ == Row(null: String, Row(null: String, null: String))))
}
test("read avro file partitioned") {
withTempPath { dir =>
val df = (0 to 1024 * 3).toDS.map(i => s"record${i}").toDF("records")
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
assert(input.collect.toSet.size === 1024 * 3 + 1)
assert(input.rdd.partitions.size > 2)
}
}
case class NestedBottom(id: Int, data: String)
case class NestedMiddle(id: Int, data: NestedBottom)
case class NestedTop(id: Int, data: NestedMiddle)
test("Validate namespace in avro file that has nested records with the same name") {
withTempPath { dir =>
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
writeDf.write.format("avro").save(dir.toString)
val schema = getAvroSchemaStringFromFiles(dir.toString)
assert(schema.contains("\\"namespace\\":\\"topLevelRecord\\""))
assert(schema.contains("\\"namespace\\":\\"topLevelRecord.data\\""))
}
}
test("saving avro that has nested records with the same name") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
val outputFolder = s"$tempDir/duplicate_names/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("check namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee",
nameSpace = "foo.bar")
assert(employeeType.getFullName == "foo.bar.employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == "foo.bar")
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "foo.bar.employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "foo.bar.employee")
}
test("check empty namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee")
assert(employeeType.getFullName == "employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == null)
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "employee")
}
case class NestedMiddleArray(id: Int, data: Array[NestedBottom])
case class NestedTopArray(id: Int, data: NestedMiddleArray)
test("saving avro that has nested records with the same name inside an array") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopArray(1, NestedMiddleArray(2, Array(
NestedBottom(3, "1"), NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_array/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
case class NestedMiddleMap(id: Int, data: Map[String, NestedBottom])
case class NestedTopMap(id: Int, data: NestedMiddleMap)
test("saving avro that has nested records with the same name inside a map") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopMap(1, NestedMiddleMap(2, Map(
"1" -> NestedBottom(3, "1"), "2" -> NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_map/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("SPARK-24805: do not ignore files without .avro extension by default") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val fileWithoutExtension = s"${dir.getCanonicalPath}/episodes"
val df1 = spark.read.format("avro").load(fileWithoutExtension)
assert(df1.count == 8)
val schema = new StructType()
.add("title", StringType)
.add("air_date", StringType)
.add("doctor", IntegerType)
val df2 = spark.read.schema(schema).format("avro").load(fileWithoutExtension)
assert(df2.count == 8)
}
}
test("SPARK-24836: checking the ignoreExtension option") {
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
val newDf = spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("SPARK-24836: ignoreExtension must override hadoop's config") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val hadoopConf = spark.sessionState.newHadoopConf()
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark
.read
.option("ignoreExtension", "true")
.format("avro")
.load(s"${dir.getCanonicalPath}/episodes")
assert(newDf.count() == 8)
}
}
}
test("SPARK-24881: write with compression - avro options") {
def getCodec(dir: String): Option[String] = {
val files = new File(dir)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
files.map { file =>
val reader = new DataFileReader(file, new GenericDatumReader[Any]())
val r = reader.getMetaString("avro.codec")
r
}.map(v => if (v == "null") "uncompressed" else v).headOption
}
def checkCodec(df: DataFrame, dir: String, codec: String): Unit = {
val subdir = s"$dir/$codec"
df.write.option("compression", codec).format("avro").save(subdir)
assert(getCodec(subdir) == Some(codec))
}
withTempPath { dir =>
val path = dir.toString
val df = spark.read.format("avro").load(testAvro)
checkCodec(df, path, "uncompressed")
checkCodec(df, path, "deflate")
checkCodec(df, path, "snappy")
checkCodec(df, path, "bzip2")
checkCodec(df, path, "xz")
}
}
private def checkSchemaWithRecursiveLoop(avroSchema: String): Unit = {
val message = intercept[IncompatibleSchemaException] {
SchemaConverters.toSqlType(new Schema.Parser().parse(avroSchema))
}.getMessage
assert(message.contains("Found recursive reference in Avro schema"))
}
test("Detect recursive loop") {
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"}, // each element has a long
| {"name": "next", "type": ["null", "LongList"]} // optional next element
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields": [
| {
| "name": "value",
| "type": {
| "type": "record",
| "name": "foo",
| "fields": [
| {
| "name": "parent",
| "type": "LongList"
| }
| ]
| }
| }
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "array", "type": {"type": "array", "items": "LongList"}}
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "map", "type": {"type": "map", "values": "LongList"}}
| ]
|}
""".stripMargin)
}
}
class AvroV1Suite extends AvroSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_READER_LIST, "avro")
.set(SQLConf.USE_V1_SOURCE_WRITER_LIST, "avro")
}
class AvroV2Suite extends AvroSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_READER_LIST, "")
.set(SQLConf.USE_V1_SOURCE_WRITER_LIST, "")
}
|
actuaryzhang/spark
|
external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSuite.scala
|
Scala
|
apache-2.0
| 58,092
|
/***
* Copyright 2015 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.cli
import java.io.File
import javax.xml.transform._
import javax.xml.transform.stream._
import com.rackspace.com.papi.components.checker.handler._
import com.rackspace.com.papi.components.checker.util.URLResolver
import com.rackspace.com.papi.components.checker.{Config, Validator}
import org.clapper.argot.ArgotConverters._
import org.clapper.argot.{ArgotParser, ArgotUsageException}
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.{FilterMapping, ServletContextHandler}
object WadlTest {
val MAX_CONSOLE_WIDTH = 100
val DEFAULT_NAME = "Test_Validator"
val DEFAULT_PORT = 9191
val title = getClass.getPackage.getImplementationTitle
val version = getClass.getPackage.getImplementationVersion
val parser = new ArgotParser("wadltest", preUsage=Some(s"$title v$version"))
val removeDups = parser.flag[Boolean] (List("d", "remove-dups"),
"Remove duplicate nodes. Default: false")
val raxRepresentation = parser.flag[Boolean] (List("R", "disable-rax-representation"),
"Disable Rax-Representation extension. Default: false")
val raxRoles = parser.flag[Boolean] (List("r", "rax-roles"),
"Enable Rax-Roles extension. Default: false")
val raxIsTenant = parser.flag[Boolean] (List("T", "rax-is-tenant"),
"Enable Rax-Is-Tenant extension. Default: false")
val raxRolesMask403 = parser.flag[Boolean] (List("M", "rax-roles-mask-403s"),
"When Rax-Roles is enable mask 403 errors with 404 or 405s. Default: false")
val authenticatedBy = parser.flag[Boolean] (List("u", "authenticated-by"),
"Enable Authenticated-By extension. Default: false")
val wellFormed = parser.flag[Boolean] (List("w", "well-formed"),
"Add checks to ensure that XML and JSON are well formed. Default: false")
val joinXPaths = parser.flag[Boolean] (List("j", "join-xpaths"),
"Join multiple XPath and XML well-formed checks into a single check. Default: false")
val xsdGrammarTransform = parser.flag[Boolean] (List("g", "xsd-grammar-transform"),
"Transform the XML after validation, to fill in things like default values etc. Default: false")
val preserveRequestBody = parser.flag[Boolean] (List("b", "preserve-req-body"),
"Ensure that the request body is preserved after validating the request.")
val preserveMethodLabels = parser.flag[Boolean] (List("L", "preserve-method-labels"),
"Ensure that method labels are always preserved.")
val xsdCheck = parser.flag[Boolean] (List("x", "xsd"),
"Add checks to ensure that XML validates against XSD grammar Default: false")
val jsonCheck = parser.flag[Boolean] (List("J", "json"),
"Add checks to ensure that JSON validates against JSON Schema grammar Default: false")
val element = parser.flag[Boolean] (List("l", "element"),
"Add checks to ensure that XML requests use the correct element : false")
val header = parser.flag[Boolean] (List("H", "header"),
"Add checks to ensure that required headers are passed in: false")
val setDefaults = parser.flag[Boolean] (List("s", "setParamDefaults"),
"Fill in required parameters if a default value is specified Default: false")
val plainParam = parser.flag[Boolean] (List("p", "plain"),
"Add checks for plain parameters : false")
val preProc = parser.flag[Boolean] (List("P", "disable-preproc-ext"),
"Disable preprocess extension : false")
val ignoreXSD = parser.flag[Boolean] (List("i", "disable-ignore-xsd-ext"),
"Disable Ignore XSD extension : false")
val ignoreJSON = parser.flag[Boolean] (List("I", "disable-ignore-json-ext"),
"Disable Ignore JSON Schema extension : false")
val message = parser.flag[Boolean] (List("m", "disable-message-ext"),
"Disable Message extension : false")
val captureHeader = parser.flag[Boolean] (List("c", "disable-capture-header-ext"),
"Disable capture header extension : false")
val anyMatch = parser.flag[Boolean] (List("a", "disable-any-match"),
"Disable any match extension : false")
val raxAssert = parser.flag[Boolean] (List("k", "disable-rax-assert"),
"Disable Rax-Assert extension : false")
val warnHeaders = parser.flag[Boolean] (List("W", "disable-warn-headers"),
"Disable warn headers : false")
val warnAgent = parser.option[String] (List("A", "warn-agent"), "agent-name",
"The name of the agent used in WARNING headers. Default: -")
val xslEngine = parser.option[String] (List("E", "xsl-engine"), "xsl-engine",
"The name of the XSLT engine to use. Possible names are Xalan, XalanC, SaxonHE, SaxonEE. Default: XalanC")
val xsdEngine = parser.option[String] (List("S", "xsd-engine"), "xsd-engine",
"The name of the XSD engine to use. Possible names are Xerces, SaxonEE. Default: Xerces")
val dontValidate = parser.flag[Boolean] (List("D", "dont-validate"),
"Don't validate produced checker Default: false")
val showErrors = parser.flag[Boolean] (List("e", "show-errors"),
"Show error nodes in the generated dot. Default: false")
val nfaMode = parser.flag[Boolean] (List("n", "nfa-mode"),
"Display the generated dot in NFA mode. Default: false")
val consoleLog = parser.flag[Boolean](List("L", "console-log"),
"Display request log in console. Default: false")
val port = parser.option[Int]("o", "portNumber", s"Port number. Default: $DEFAULT_PORT")
val name = parser.option[String]("N", "name", s"The validator name. Default: $DEFAULT_NAME")
val help = parser.flag[Boolean] (List("h", "help"),
"Display usage.")
val xpathVersion = parser.option[Int](List("t", "xpath-version"), "n",
"XPath version to use. Can be 10, 20, 30, 31 for 1.0, 2.0, 3.0, and 3.1. Default: 10")
val input = parser.parameter[String]("wadl",
"WADL file/uri to read. If not specified, stdin will be used.",
true)
val printVersion = parser.flag[Boolean] (List("version"),
"Display version.")
val outputMetadata = parser.flag[Boolean](List("O", "output-metadata"),
"Display checker metadata")
def getSource: Source = {
var source: Source = null
if (input.value.isEmpty) {
source = new StreamSource(System.in)
} else {
source = new StreamSource(URLResolver.toAbsoluteSystemId(input.value.get))
}
source
}
def handleArgs(args: Array[String]): Unit = {
parser.parse(args)
if (help.value.getOrElse(false)) {
parser.usage()
}
}
//
// Draw a silly console box, cus it's cool...
//
def drawBox(title : String, content : String) {
val allText = title+"\\n"+content
val currMax = allText.split("\\n").map(s=> s.trim).map(s => s.length()).reduceLeft((x,y) => if (x >= y) x else y)
def padString (s : String, pad : Int, padString : String = " ") : String = {
s.padTo(pad,padString).map(a => a.toString).foldRight(""){(a, b) => a+b}
}
println()
println (" β"+padString("",currMax,"β")+"β")
println (" β "+padString(title, currMax-1)+"β")
println (" β"+padString("",currMax,"β")+"β‘")
content.split("\\n").map(s=>s.trim()).foreach (s => {
if (s.contains(Console.RESET)) {
println (" β "+padString(s,currMax+7)+"β")
} else {
println (" β "+padString(s,currMax-1)+"β")
}
})
println (" β"+padString("",currMax,"β")+"β")
println()
}
def runServer (name : String, port : Int, metaOut : Option[StreamResult], dot : File, config : Config) : Unit = {
val source = getSource
val sourceName = {
if (source.asInstanceOf[StreamSource].getInputStream != null) {
"<STDIN>"
} else {
source.asInstanceOf[StreamSource].getSystemId
}
}
val echoParam = OkayServlet.ECHO_CONTENT_PARAM
val respParam = OkayServlet.RESPONSE_TYPE
//
// Initalize the validator, this catches errors early...
//
println(s"Loading $sourceName...\\n")
val validator = Validator (name, source, metaOut, config)
//
// Initialize the server....
//
val server = new Server(port)
val servletContextHandler = new ServletContextHandler(ServletContextHandler.NO_SESSIONS)
val servletHandler = servletContextHandler.getServletHandler
servletContextHandler.setContextPath("/")
servletContextHandler.setResourceBase(System.getProperty("java.io.tmpdir"))
servletContextHandler.setAttribute(ValidatorFilter.VALIDATOR_ATTRIB, validator)
server.setHandler(servletContextHandler)
servletHandler.addServletWithMapping(classOf[OkayServlet], "/*")
servletHandler.addFilterWithMapping(classOf[ValidatorFilter], "/*", FilterMapping.REQUEST)
server.start()
//
// Display a nice little text box with important info...
//
val B = Console.BOLD
val R = Console.RESET
drawBox(s"$title $version",
s"""
Running validator $B$name$R
Port: $B$port$R
WADL Input: $B$sourceName$R
Dot File: $B$dot$R
The service should return a 200 response if the request
validates against the WADL, it will return a 4xx code
with an appropriate message otherwise.
You can pass an '$B$echoParam$R' query paramater to the
request to have the service echo the body of the request
in the response.
You can pass a '$B$respParam$R' query paramater to the
request to set the ContentType of the response to the value
of that parameter.
""")
//
// Let the current thread join until the server is don executing...
//
server.join()
}
def main(args: Array[String]) = {
try {
handleArgs(args)
if (printVersion.value.getOrElse(false)) {
println(s"$title v$version")
} else {
val c = new Config
c.removeDups = removeDups.value.getOrElse(false)
c.enableRaxRolesExtension = raxRoles.value.getOrElse(false)
c.enableRaxIsTenantExtension = raxIsTenant.value.getOrElse(false)
c.enableRaxRepresentationExtension = !raxRepresentation.value.getOrElse(false)
c.maskRaxRoles403 = raxRolesMask403.value.getOrElse(false)
c.enableAuthenticatedByExtension = authenticatedBy.value.getOrElse(false)
c.checkWellFormed = wellFormed.value.getOrElse(false)
c.checkXSDGrammar = xsdCheck.value.getOrElse(false)
c.checkJSONGrammar = jsonCheck.value.getOrElse(false)
c.checkElements = element.value.getOrElse(false)
c.checkPlainParams = plainParam.value.getOrElse(false)
c.enablePreProcessExtension = !preProc.value.getOrElse(false)
c.joinXPathChecks = joinXPaths.value.getOrElse(false)
c.checkHeaders = header.value.getOrElse(false)
c.setParamDefaults = setDefaults.value.getOrElse(false)
c.enableIgnoreXSDExtension = !ignoreXSD.value.getOrElse(false)
c.enableIgnoreJSONSchemaExtension = !ignoreJSON.value.getOrElse(false)
c.enableMessageExtension = !message.value.getOrElse(false)
c.enableCaptureHeaderExtension = !captureHeader.value.getOrElse(false)
c.enableAnyMatchExtension = !anyMatch.value.getOrElse(false)
c.enableAssertExtension = !raxAssert.value.getOrElse(false)
c.xpathVersion = xpathVersion.value.getOrElse(10)
c.preserveRequestBody = preserveRequestBody.value.getOrElse(false)
c.preserveMethodLabels = preserveMethodLabels.value.getOrElse(false)
c.doXSDGrammarTransform = xsdGrammarTransform.value.getOrElse(false)
c.validateChecker = !dontValidate.value.getOrElse(false)
c.enableWarnHeaders = !warnHeaders.value.getOrElse(false)
c.warnAgent = warnAgent.value.getOrElse("-")
c.xslEngine = xslEngine.value.getOrElse("XalanC")
c.xsdEngine = xsdEngine.value.getOrElse("Xerces")
val metaOutResult = {
if (outputMetadata.value.getOrElse(false)) {
Some(new StreamResult(System.err))
} else {
None
}
}
val dot = File.createTempFile("chk", ".dot")
dot.deleteOnExit()
val handlerList = {
val dotHandler = new SaveDotHandler(dot, !showErrors.value.getOrElse(false),
nfaMode.value.getOrElse(false))
val initList = List[ResultHandler](dotHandler,
new ServletResultHandler(),
new InstrumentedHandler())
if (consoleLog.value.getOrElse(false)) {
initList :+ new ConsoleResultHandler()
} else {
initList
}
}
c.resultHandler = new DispatchResultHandler(handlerList)
runServer(name.value.getOrElse(DEFAULT_NAME), port.value.getOrElse(DEFAULT_PORT), metaOutResult, dot, c)
}
} catch {
case e: ArgotUsageException => println(e.message)
case iae : IllegalArgumentException => println(iae.getMessage)
}
}
}
|
wdschei/api-checker
|
cli/wadltest/src/main/scala/com/rackspace/com/papi/components/checker/cli/WadlTest.scala
|
Scala
|
apache-2.0
| 15,044
|
package com.thoughtworks.deeplearning.plugins
import java.nio.ByteBuffer
import com.dongxiguo.fastring.Fastring
import com.dongxiguo.fastring.Fastring.Implicits._
import com.thoughtworks.continuation._
import com.thoughtworks.feature.Caller
import com.typesafe.scalalogging.{CanLog, Logger}
import org.slf4j.MDC
import sourcecode.{FullName, Name}
private object Logging {
implicit object CanLogSourceCode extends CanLog[(sourcecode.FullName, sourcecode.Name, Caller[_])] {
private final val mdcKeyFullName = "sourcecode.FullName"
private final val mdcKeyName = "sourcecode.Name"
private final val mdcKeyCaller = "com.thoughtworks.feature.Caller"
def logMessage(originalMessage: String, attachments: (sourcecode.FullName, sourcecode.Name, Caller[_])): String = {
MDC.put(mdcKeyFullName, attachments._1.value)
MDC.put(mdcKeyName, attachments._2.value)
MDC.put(mdcKeyCaller, attachments._3.value.getClass.getCanonicalName)
originalMessage
}
override def afterLog(attachments: (sourcecode.FullName, sourcecode.Name, Caller[_])): Unit = {
MDC.remove(mdcKeyFullName)
MDC.remove(mdcKeyName)
MDC.remove(mdcKeyCaller)
super.afterLog(attachments)
}
}
}
/** A plugin that logs uncaught exceptions.
*
* @author ζ¨ε (Yang Bo)
*/
trait Logging extends Differentiables {
import Logging._
protected val logger: Logger
trait DifferentiableApi extends super.DifferentiableApi {
implicit protected def fullName: FullName
implicit protected def name: Name
implicit protected def caller: Caller[_]
override protected def handleException(thrown: Throwable): UnitContinuation[Unit] = {
UnitContinuation.delay {
Logger
.takingImplicit[(FullName, Name, Caller[_])](logger.underlying)
.error("An uncaught exception is thrown", thrown)((fullName, name, caller))
}
}
}
type Differentiable <: DifferentiableApi
}
|
ThoughtWorksInc/DeepLearning.scala
|
plugins-Logging/src/main/scala/com/thoughtworks/deeplearning/plugins/Logging.scala
|
Scala
|
apache-2.0
| 1,958
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.app.Gatling
import io.gatling.core.config.GatlingPropertiesBuilder
object Engine extends App {
val props = new GatlingPropertiesBuilder
props.dataDirectory(IDEPathHelper.dataDirectory.toString)
props.resultsDirectory(IDEPathHelper.resultsDirectory.toString)
props.bodiesDirectory(IDEPathHelper.bodiesDirectory.toString)
props.binariesDirectory(IDEPathHelper.mavenBinariesDirectory.toString)
Gatling.fromMap(props.build)
}
|
mdunker/usergrid
|
tests/performance/src/test/scala/Engine.scala
|
Scala
|
apache-2.0
| 1,252
|
package scorex.transaction
import com.google.common.primitives.{Bytes, Ints}
import org.h2.mvstore.MVStore
import play.api.libs.json.{JsArray, JsObject, Json}
import scorex.account.{Account, PrivateKeyAccount, PublicKeyAccount}
import scorex.app.Application
import scorex.block.{Block, BlockField}
import scorex.network.message.Message
import scorex.network.{Broadcast, NetworkController, TransactionalMessagesRepo}
import scorex.settings.Settings
import scorex.transaction.SimpleTransactionModule.StoredInBlock
import scorex.transaction.state.database.UnconfirmedTransactionsDatabaseImpl
import scorex.transaction.state.database.blockchain.{StoredState, StoredBlockTree, StoredBlockchain}
import scorex.transaction.state.wallet.Payment
import scorex.utils._
import scorex.wallet.Wallet
import scala.concurrent.duration._
import scala.util.Try
case class TransactionsBlockField(override val value: Seq[Transaction])
extends BlockField[Seq[Transaction]] {
import SimpleTransactionModule.MaxTransactionsPerBlock
override val name = "transactions"
override lazy val json: JsObject = Json.obj(name -> JsArray(value.map(_.json)))
override lazy val bytes: Array[Byte] = {
val txCount = value.size.ensuring(_ <= MaxTransactionsPerBlock).toByte
value.foldLeft(Array(txCount)) { case (bs, tx) =>
val txBytes = tx.bytes
bs ++ Bytes.ensureCapacity(Ints.toByteArray(txBytes.length), 4, 0) ++ txBytes
}
}
}
class SimpleTransactionModule(implicit val settings: TransactionSettings with Settings, application: Application)
extends TransactionModule[StoredInBlock] with ScorexLogging {
import SimpleTransactionModule._
val consensusModule = application.consensusModule
val networkController = application.networkController
val TransactionSizeLength = 4
val InitialBalance = 60000000000L
private val instance = this
override val utxStorage: UnconfirmedTransactionsStorage = new UnconfirmedTransactionsDatabaseImpl
override val blockStorage = new BlockStorage {
val db = settings.dataDirOpt match {
case Some(dataFolder) => new MVStore.Builder().fileName(dataFolder + s"/blockchain.dat").compress().open()
case None => new MVStore.Builder().open()
}
override val MaxRollback: Int = settings.MaxRollback
override val history: History = settings.history match {
case s: String if s.equalsIgnoreCase("blockchain") =>
new StoredBlockchain(db)(consensusModule, instance)
case s: String if s.equalsIgnoreCase("blocktree") =>
new StoredBlockTree(settings.dataDirOpt, MaxRollback)(consensusModule, instance)
case s =>
log.error(s"Unknown history storage: $s. Use StoredBlockchain...")
new StoredBlockchain(db)(consensusModule, instance)
}
override val state = new StoredState(db)
}
/**
* In Lagonaki, transaction-related data is just sequence of transactions. No Merkle-tree root of txs / state etc
*
* @param bytes - serialized sequence of transaction
* @return
*/
override def parseBytes(bytes: Array[Byte]): Try[TransactionsBlockField] = Try {
bytes.isEmpty match {
case true => TransactionsBlockField(Seq())
case false =>
val txData = bytes.tail
val txCount = bytes.head // so 255 txs max
formBlockData((1 to txCount).foldLeft((0: Int, Seq[LagonakiTransaction]())) { case ((pos, txs), _) =>
val transactionLengthBytes = txData.slice(pos, pos + TransactionSizeLength)
val transactionLength = Ints.fromByteArray(transactionLengthBytes)
val transactionBytes = txData.slice(pos + TransactionSizeLength, pos + TransactionSizeLength + transactionLength)
val transaction = LagonakiTransaction.parseBytes(transactionBytes).get
(pos + TransactionSizeLength + transactionLength, txs :+ transaction)
}._2)
}
}
override def formBlockData(transactions: StoredInBlock): TransactionsBlockField = TransactionsBlockField(transactions)
//TODO asInstanceOf
override def transactions(block: Block): StoredInBlock =
block.transactionDataField.asInstanceOf[TransactionsBlockField].value
override def packUnconfirmed(): StoredInBlock = {
clearIncorrectTransactions()
blockStorage.state.validate(utxStorage.all().sortBy(-_.fee).take(MaxTransactionsPerBlock))
}
//todo: check: clear unconfirmed txs on receiving a block
override def clearFromUnconfirmed(data: StoredInBlock): Unit = {
data.foreach(tx => utxStorage.getBySignature(tx.signature) match {
case Some(unconfirmedTx) => utxStorage.remove(unconfirmedTx)
case None =>
})
clearIncorrectTransactions()
}
//Romove too old or invalid transactions from UnconfirmedTransactionsPool
def clearIncorrectTransactions(): Unit = {
val lastBlockTs = blockStorage.history.lastBlock.timestampField.value
val txs = utxStorage.all()
val notTooOld = txs.filter { tx =>
if ((lastBlockTs - tx.timestamp).millis > MaxTimeForUnconfirmed) utxStorage.remove(tx)
(lastBlockTs - tx.timestamp).millis <= MaxTimeForUnconfirmed
}
notTooOld.diff(blockStorage.state.validate(txs)).foreach(tx => utxStorage.remove(tx))
}
override def onNewOffchainTransaction(transaction: Transaction): Unit =
if (utxStorage.putIfNew(transaction)) {
val spec = TransactionalMessagesRepo.TransactionMessageSpec
val ntwMsg = Message(spec, Right(transaction), None)
networkController ! NetworkController.SendToNetwork(ntwMsg, Broadcast)
}
def createPayment(payment: Payment, wallet: Wallet): Option[PaymentTransaction] = {
wallet.privateKeyAccount(payment.sender).map { sender =>
createPayment(sender, new Account(payment.recipient), payment.amount, payment.fee)
}
}
def createPayment(sender: PrivateKeyAccount, recipient: Account, amount: Long, fee: Long): PaymentTransaction = {
val time = NTP.correctedTime()
val sig = PaymentTransaction.generateSignature(sender, recipient, amount, fee, time)
val payment = new PaymentTransaction(new PublicKeyAccount(sender.publicKey), recipient, amount, fee, time, sig)
if (blockStorage.state.isValid(payment)) onNewOffchainTransaction(payment)
payment
}
override def genesisData: BlockField[StoredInBlock] = {
val ipoMembers = List(
"3Mb4mR4taeYS3wci78SntztFwLoaS6Wbg81",
"3MbWTyn6Tg7zL6XbdN8TLcFMfhWX77hKcmc",
"3Mn3UAtrpGY3cwiqLYf973q29oDR2LpnMYv"
)
val timestamp = 0L
val totalBalance = InitialBalance
val txs = ipoMembers.map { addr =>
val recipient = new Account(addr)
GenesisTransaction(recipient, totalBalance / ipoMembers.length, timestamp)
}
TransactionsBlockField(txs)
}
override def isValid(block: Block): Boolean = {
val lastBlockTs = blockStorage.history.lastBlock.timestampField.value
lazy val txsAreNew = block.transactions.forall(tx => (lastBlockTs - tx.timestamp).millis <= MaxTxAndBlockDiff)
lazy val blockIsValid = blockStorage.state.isValid(block.transactions, blockStorage.history.heightOf(block))
txsAreNew && blockIsValid
}
}
object SimpleTransactionModule {
type StoredInBlock = Seq[Transaction]
val MaxTimeForUnconfirmed = 90.minutes
val MaxTxAndBlockDiff = 2.hour
val MaxTransactionsPerBlock = 100
}
|
ConsensusResearch/Scorex-Lagonaki
|
scorex-transaction/src/main/scala/scorex/transaction/SimpleTransactionModule.scala
|
Scala
|
cc0-1.0
| 7,286
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.api.python.{PythonEvalType, PythonFunction}
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.expressions.{And, AttributeReference, GreaterThan, In}
import org.apache.spark.sql.execution.{FilterExec, InputAdapter, SparkPlanTest, WholeStageCodegenExec}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.BooleanType
class BatchEvalPythonExecSuite extends SparkPlanTest with SharedSQLContext {
import testImplicits.newProductEncoder
import testImplicits.localSeqToDatasetHolder
override def beforeAll(): Unit = {
super.beforeAll()
spark.udf.registerPython("dummyPythonUDF", new MyDummyPythonUDF)
}
override def afterAll(): Unit = {
spark.sessionState.functionRegistry.dropFunction(FunctionIdentifier("dummyPythonUDF"))
super.afterAll()
}
test("Python UDF: push down deterministic FilterExec predicates") {
val df = Seq(("Hello", 4)).toDF("a", "b")
.where("dummyPythonUDF(b) and dummyPythonUDF(a) and a in (3, 4)")
val qualifiedPlanNodes = df.queryExecution.executedPlan.collect {
case f @ FilterExec(
And(_: AttributeReference, _: AttributeReference),
InputAdapter(_: BatchEvalPythonExec)) => f
case b @ BatchEvalPythonExec(_, _, WholeStageCodegenExec(FilterExec(_: In, _))) => b
}
assert(qualifiedPlanNodes.size == 2)
}
test("Nested Python UDF: push down deterministic FilterExec predicates") {
val df = Seq(("Hello", 4)).toDF("a", "b")
.where("dummyPythonUDF(a, dummyPythonUDF(a, b)) and a in (3, 4)")
val qualifiedPlanNodes = df.queryExecution.executedPlan.collect {
case f @ FilterExec(_: AttributeReference, InputAdapter(_: BatchEvalPythonExec)) => f
case b @ BatchEvalPythonExec(_, _, WholeStageCodegenExec(FilterExec(_: In, _))) => b
}
assert(qualifiedPlanNodes.size == 2)
}
test("Python UDF: no push down on non-deterministic") {
val df = Seq(("Hello", 4)).toDF("a", "b")
.where("b > 4 and dummyPythonUDF(a) and rand() > 0.3")
val qualifiedPlanNodes = df.queryExecution.executedPlan.collect {
case f @ FilterExec(
And(_: AttributeReference, _: GreaterThan),
InputAdapter(_: BatchEvalPythonExec)) => f
case b @ BatchEvalPythonExec(_, _, WholeStageCodegenExec(_: FilterExec)) => b
}
assert(qualifiedPlanNodes.size == 2)
}
test("Python UDF: push down on deterministic predicates after the first non-deterministic") {
val df = Seq(("Hello", 4)).toDF("a", "b")
.where("dummyPythonUDF(a) and rand() > 0.3 and b > 4")
val qualifiedPlanNodes = df.queryExecution.executedPlan.collect {
case f @ FilterExec(
And(_: AttributeReference, _: GreaterThan),
InputAdapter(_: BatchEvalPythonExec)) => f
case b @ BatchEvalPythonExec(_, _, WholeStageCodegenExec(_: FilterExec)) => b
}
assert(qualifiedPlanNodes.size == 2)
}
test("Python UDF refers to the attributes from more than one child") {
val df = Seq(("Hello", 4)).toDF("a", "b")
val df2 = Seq(("Hello", 4)).toDF("c", "d")
val joinDF = df.crossJoin(df2).where("dummyPythonUDF(a, c) == dummyPythonUDF(d, c)")
val qualifiedPlanNodes = joinDF.queryExecution.executedPlan.collect {
case b: BatchEvalPythonExec => b
}
assert(qualifiedPlanNodes.size == 1)
}
}
// This Python UDF is dummy and just for testing. Unable to execute.
class DummyUDF extends PythonFunction(
command = Array[Byte](),
envVars = Map("" -> "").asJava,
pythonIncludes = ArrayBuffer("").asJava,
pythonExec = "",
pythonVer = "",
broadcastVars = null,
accumulator = null)
class MyDummyPythonUDF extends UserDefinedPythonFunction(
name = "dummyUDF",
func = new DummyUDF,
dataType = BooleanType,
pythonEvalType = PythonEvalType.SQL_BATCHED_UDF,
udfDeterministic = true)
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExecSuite.scala
|
Scala
|
apache-2.0
| 4,818
|
package net.sansa_stack.inference.utils
import org.apache.jena.reasoner.TriplePattern
import org.apache.jena.sparql.util.NodeComparator
/**
* Ordering for triple patterns.
*
* @author Lorenz Buehmann
*/
class TriplePatternOrdering extends Ordering[TriplePattern]{
implicit val comp: NodeComparator = new NodeComparator
override def compare(x: TriplePattern, y: TriplePattern): Int = {
Ordering.by{t: TriplePattern => (t.getSubject, t.getPredicate, t.getObject)}.compare(x, y)
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/utils/TriplePatternOrdering.scala
|
Scala
|
apache-2.0
| 501
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import java.util.UUID
import java.util.concurrent.TimeUnit
import java.util.function.UnaryOperator
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Map => MutableMap}
import org.apache.spark.SparkEnv
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, CurrentDate, CurrentTimestamp}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, StreamingDataSourceV2Relation, WriteToDataSourceV2}
import org.apache.spark.sql.execution.streaming.{ContinuousExecutionRelation, StreamingRelationV2, _}
import org.apache.spark.sql.sources.v2.{ContinuousReadSupport, DataSourceOptions, StreamWriteSupport}
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousReader, PartitionOffset}
import org.apache.spark.sql.streaming.{OutputMode, ProcessingTime, Trigger}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.{Clock, Utils}
class ContinuousExecution(
sparkSession: SparkSession,
name: String,
checkpointRoot: String,
analyzedPlan: LogicalPlan,
sink: StreamWriteSupport,
trigger: Trigger,
triggerClock: Clock,
outputMode: OutputMode,
extraOptions: Map[String, String],
deleteCheckpointOnStop: Boolean)
extends StreamExecution(
sparkSession, name, checkpointRoot, analyzedPlan, sink,
trigger, triggerClock, outputMode, deleteCheckpointOnStop) {
@volatile protected var continuousSources: Seq[ContinuousReader] = Seq()
override protected def sources: Seq[BaseStreamingSource] = continuousSources
// For use only in test harnesses.
private[sql] var currentEpochCoordinatorId: String = _
override val logicalPlan: LogicalPlan = {
val toExecutionRelationMap = MutableMap[StreamingRelationV2, ContinuousExecutionRelation]()
analyzedPlan.transform {
case r @ StreamingRelationV2(
source: ContinuousReadSupport, _, extraReaderOptions, output, _) =>
toExecutionRelationMap.getOrElseUpdate(r, {
ContinuousExecutionRelation(source, extraReaderOptions, output)(sparkSession)
})
case StreamingRelationV2(_, sourceName, _, _, _) =>
throw new UnsupportedOperationException(
s"Data source $sourceName does not support continuous processing.")
}
}
private val triggerExecutor = trigger match {
case ContinuousTrigger(t) => ProcessingTimeExecutor(ProcessingTime(t), triggerClock)
case _ => throw new IllegalStateException(s"Unsupported type of trigger: $trigger")
}
override protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit = {
val stateUpdate = new UnaryOperator[State] {
override def apply(s: State) = s match {
// If we ended the query to reconfigure, reset the state to active.
case RECONFIGURING => ACTIVE
case _ => s
}
}
do {
runContinuous(sparkSessionForStream)
} while (state.updateAndGet(stateUpdate) == ACTIVE)
}
/**
* Populate the start offsets to start the execution at the current offsets stored in the sink
* (i.e. avoid reprocessing data that we have already processed). This function must be called
* before any processing occurs and will populate the following fields:
* - currentBatchId
* - committedOffsets
* The basic structure of this method is as follows:
*
* Identify (from the commit log) the latest epoch that has committed
* IF last epoch exists THEN
* Get end offsets for the epoch
* Set those offsets as the current commit progress
* Set the next epoch ID as the last + 1
* Return the end offsets of the last epoch as start for the next one
* DONE
* ELSE
* Start a new query log
* DONE
*/
private def getStartOffsets(sparkSessionToRunBatches: SparkSession): OffsetSeq = {
// Note that this will need a slight modification for exactly once. If ending offsets were
// reported but not committed for any epochs, we must replay exactly to those offsets.
// For at least once, we can just ignore those reports and risk duplicates.
commitLog.getLatest() match {
case Some((latestEpochId, _)) =>
val nextOffsets = offsetLog.get(latestEpochId).getOrElse {
throw new IllegalStateException(
s"Batch $latestEpochId was committed without end epoch offsets!")
}
committedOffsets = nextOffsets.toStreamProgress(sources)
// Get to an epoch ID that has definitely never been sent to a sink before. Since sink
// commit happens between offset log write and commit log write, this means an epoch ID
// which is not in the offset log.
val (latestOffsetEpoch, _) = offsetLog.getLatest().getOrElse {
throw new IllegalStateException(
s"Offset log had no latest element. This shouldn't be possible because nextOffsets is" +
s"an element.")
}
currentBatchId = latestOffsetEpoch + 1
logDebug(s"Resuming at epoch $currentBatchId with committed offsets $committedOffsets")
nextOffsets
case None =>
// We are starting this stream for the first time. Offsets are all None.
logInfo(s"Starting new streaming query.")
currentBatchId = 0
OffsetSeq.fill(continuousSources.map(_ => null): _*)
}
}
/**
* Do a continuous run.
* @param sparkSessionForQuery Isolated [[SparkSession]] to run the continuous query with.
*/
private def runContinuous(sparkSessionForQuery: SparkSession): Unit = {
// A list of attributes that will need to be updated.
val replacements = new ArrayBuffer[(Attribute, Attribute)]
// Translate from continuous relation to the underlying data source.
var nextSourceId = 0
continuousSources = logicalPlan.collect {
case ContinuousExecutionRelation(dataSource, extraReaderOptions, output) =>
val metadataPath = s"$resolvedCheckpointRoot/sources/$nextSourceId"
nextSourceId += 1
dataSource.createContinuousReader(
java.util.Optional.empty[StructType](),
metadataPath,
new DataSourceOptions(extraReaderOptions.asJava))
}
uniqueSources = continuousSources.distinct
val offsets = getStartOffsets(sparkSessionForQuery)
var insertedSourceId = 0
val withNewSources = logicalPlan transform {
case ContinuousExecutionRelation(_, _, output) =>
val reader = continuousSources(insertedSourceId)
insertedSourceId += 1
val newOutput = reader.readSchema().toAttributes
assert(output.size == newOutput.size,
s"Invalid reader: ${Utils.truncatedString(output, ",")} != " +
s"${Utils.truncatedString(newOutput, ",")}")
replacements ++= output.zip(newOutput)
val loggedOffset = offsets.offsets(0)
val realOffset = loggedOffset.map(off => reader.deserializeOffset(off.json))
reader.setStartOffset(java.util.Optional.ofNullable(realOffset.orNull))
new StreamingDataSourceV2Relation(newOutput, reader)
}
// Rewire the plan to use the new attributes that were returned by the source.
val replacementMap = AttributeMap(replacements)
val triggerLogicalPlan = withNewSources transformAllExpressions {
case a: Attribute if replacementMap.contains(a) =>
replacementMap(a).withMetadata(a.metadata)
case (_: CurrentTimestamp | _: CurrentDate) =>
throw new IllegalStateException(
"CurrentTimestamp and CurrentDate not yet supported for continuous processing")
}
val writer = sink.createStreamWriter(
s"$runId",
triggerLogicalPlan.schema,
outputMode,
new DataSourceOptions(extraOptions.asJava))
val withSink = WriteToDataSourceV2(writer, triggerLogicalPlan)
val reader = withSink.collect {
case StreamingDataSourceV2Relation(_, r: ContinuousReader) => r
}.head
reportTimeTaken("queryPlanning") {
lastExecution = new IncrementalExecution(
sparkSessionForQuery,
withSink,
outputMode,
checkpointFile("state"),
runId,
currentBatchId,
offsetSeqMetadata)
lastExecution.executedPlan // Force the lazy generation of execution plan
}
sparkSessionForQuery.sparkContext.setLocalProperty(
ContinuousExecution.START_EPOCH_KEY, currentBatchId.toString)
// Add another random ID on top of the run ID, to distinguish epoch coordinators across
// reconfigurations.
val epochCoordinatorId = s"$runId--${UUID.randomUUID}"
currentEpochCoordinatorId = epochCoordinatorId
sparkSessionForQuery.sparkContext.setLocalProperty(
ContinuousExecution.EPOCH_COORDINATOR_ID_KEY, epochCoordinatorId)
// Use the parent Spark session for the endpoint since it's where this query ID is registered.
val epochEndpoint =
EpochCoordinatorRef.create(
writer, reader, this, epochCoordinatorId, currentBatchId, sparkSession, SparkEnv.get)
val epochUpdateThread = new Thread(new Runnable {
override def run: Unit = {
try {
triggerExecutor.execute(() => {
startTrigger()
if (reader.needsReconfiguration() && state.compareAndSet(ACTIVE, RECONFIGURING)) {
if (queryExecutionThread.isAlive) {
queryExecutionThread.interrupt()
}
false
} else if (isActive) {
currentBatchId = epochEndpoint.askSync[Long](IncrementAndGetEpoch)
logInfo(s"New epoch $currentBatchId is starting.")
true
} else {
false
}
})
} catch {
case _: InterruptedException =>
// Cleanly stop the query.
return
}
}
}, s"epoch update thread for $prettyIdString")
try {
epochUpdateThread.setDaemon(true)
epochUpdateThread.start()
reportTimeTaken("runContinuous") {
SQLExecution.withNewExecutionId(
sparkSessionForQuery, lastExecution)(lastExecution.toRdd)
}
} catch {
case t: Throwable
if StreamExecution.isInterruptionException(t) && state.get() == RECONFIGURING =>
logInfo(s"Query $id ignoring exception from reconfiguring: $t")
// interrupted by reconfiguration - swallow exception so we can restart the query
} finally {
epochEndpoint.askSync[Unit](StopContinuousExecutionWrites)
SparkEnv.get.rpcEnv.stop(epochEndpoint)
epochUpdateThread.interrupt()
epochUpdateThread.join()
stopSources()
sparkSession.sparkContext.cancelJobGroup(runId.toString)
}
}
/**
* Report ending partition offsets for the given reader at the given epoch.
*/
def addOffset(
epoch: Long, reader: ContinuousReader, partitionOffsets: Seq[PartitionOffset]): Unit = {
assert(continuousSources.length == 1, "only one continuous source supported currently")
val globalOffset = reader.mergeOffsets(partitionOffsets.toArray)
val oldOffset = synchronized {
offsetLog.add(epoch, OffsetSeq.fill(globalOffset))
offsetLog.get(epoch - 1)
}
// If offset hasn't changed since last epoch, there's been no new data.
if (oldOffset.contains(OffsetSeq.fill(globalOffset))) {
noNewData = true
}
awaitProgressLock.lock()
try {
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
}
/**
* Mark the specified epoch as committed. All readers must have reported end offsets for the epoch
* before this is called.
*/
def commit(epoch: Long): Unit = {
assert(continuousSources.length == 1, "only one continuous source supported currently")
assert(offsetLog.get(epoch).isDefined, s"offset for epoch $epoch not reported before commit")
synchronized {
if (queryExecutionThread.isAlive) {
commitLog.add(epoch)
val offset = offsetLog.get(epoch).get.offsets(0).get
committedOffsets ++= Seq(continuousSources(0) -> offset)
} else {
return
}
}
if (minLogEntriesToMaintain < currentBatchId) {
offsetLog.purge(currentBatchId - minLogEntriesToMaintain)
commitLog.purge(currentBatchId - minLogEntriesToMaintain)
}
awaitProgressLock.lock()
try {
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
}
/**
* Blocks the current thread until execution has committed at or after the specified epoch.
*/
private[sql] def awaitEpoch(epoch: Long): Unit = {
def notDone = {
val latestCommit = commitLog.getLatest()
latestCommit match {
case Some((latestEpoch, _)) =>
latestEpoch < epoch
case None => true
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(100, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
}
}
object ContinuousExecution {
val START_EPOCH_KEY = "__continuous_start_epoch"
val EPOCH_COORDINATOR_ID_KEY = "__epoch_coordinator_id"
}
|
esi-mineset/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousExecution.scala
|
Scala
|
apache-2.0
| 14,230
|
package com.wordtrellis.projecteuler
import scala.collection.mutable.ListBuffer
/**
* Unit Fraction is useful for determining the number of places it takes for a repeating decimal
* to repeat.
* see Project Euler problem 26
* @author Todd Cook
*
*/
class UnitFraction(val num: Int, val MAX_SAMPLE_LENGTH: Int = 2000) {
private val placeDivisors = new ListBuffer[Int]()
def seed: Int = num
def places(): List[Int] = placeDivisors.toList
def initialize(): Unit = {
val divisor = num
var base = 1
var processing = true
placeDivisors.append(0)
while (processing) {
base = 10 * base
if (base < divisor) {
placeDivisors.append(0)
base = 10 * base
}
val tmpResult = base / divisor
base = base - (divisor * tmpResult) // .asInstanceOf[Int]
placeDivisors.append(tmpResult.asInstanceOf[Int])
if (base == 0) {
processing = false
}
if (placeDivisors.length > MAX_SAMPLE_LENGTH) {
processing = false
}
}
}
initialize()
override def toString: String = placeDivisors.mkString("")
}
|
todd-cook/Effective-Scala-with-Project-Euler
|
src/main/scala/com/wordtrellis/projecteuler/UnitFraction.scala
|
Scala
|
mit
| 1,126
|
package im.tox.toktok.app.main
import android.content.Context
import android.graphics.drawable.TransitionDrawable
import android.support.v4.widget.NestedScrollView
import android.support.v7.widget.CardView
import android.util.AttributeSet
import android.view.View.MeasureSpec
import android.view.animation.{ Animation, AnimationUtils }
import android.view.{ View, ViewGroup }
import android.widget.{ EditText, LinearLayout }
import im.tox.toktok.TypedResource._
import im.tox.toktok.{ R, TR }
final class HomeSearch(
context: Context,
attrs: AttributeSet,
defStyle: Int
) extends ViewGroup(context, attrs, defStyle) {
private var mBackground: TransitionDrawable = null
private var mBase: LinearLayout = null
private var mInput: EditText = null
private var mCardView: CardView = null
private var mRecycler: NestedScrollView = null
def this(context: Context, attrs: AttributeSet) { this(context, attrs, 0) }
def this(context: Context) { this(context, null) }
protected override def onFinishInflate(): Unit = {
mBackground = getBackground.asInstanceOf[TransitionDrawable]
mBackground.startTransition(500)
mBase = this.findView(TR.home_search_layout)
mCardView = this.findView(TR.home_search_bar)
mRecycler = this.findView(TR.home_search_bar_recycler)
val searchBarAnimation = AnimationUtils.loadAnimation(mCardView.getContext, R.anim.abc_fade_in)
mCardView.startAnimation(searchBarAnimation)
mInput = this.findView(TR.home_search_input)
super.onFinishInflate()
}
protected override def onLayout(changed: Boolean, l: Int, t: Int, r: Int, b: Int): Unit = {
mBase.layout(0, getStatusBarHeight, getWidth, getHeight)
}
private def getStatusBarHeight: Int = {
val resourceId = getResources.getIdentifier("status_bar_height", "dimen", "android")
if (resourceId > 0) {
getResources.getDimensionPixelSize(resourceId)
} else {
0
}
}
protected override def onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int): Unit = {
measureChildren(widthMeasureSpec, heightMeasureSpec)
val maxWidth: Int = MeasureSpec.getSize(widthMeasureSpec)
val maxHeight: Int = MeasureSpec.getSize(heightMeasureSpec)
setMeasuredDimension(View.resolveSizeAndState(maxWidth, widthMeasureSpec, 0), View.resolveSizeAndState(maxHeight, heightMeasureSpec, 0))
}
def finish(after: => Unit): Unit = {
if (mInput.isFocusable) {
mInput.clearFocus()
}
val searchBarAnimation = AnimationUtils.loadAnimation(mCardView.getContext, R.anim.abc_fade_out)
searchBarAnimation.setAnimationListener(new Animation.AnimationListener() {
def onAnimationStart(animation: Animation): Unit = {}
def onAnimationEnd(animation: Animation): Unit = {
mCardView.setVisibility(View.INVISIBLE)
setVisibility(View.GONE)
mBackground.reverseTransition(500)
after
}
def onAnimationRepeat(animation: Animation): Unit = {}
})
mCardView.startAnimation(searchBarAnimation)
}
}
|
vassad/toktok
|
src/main/scala/im/tox/toktok/app/main/HomeSearch.scala
|
Scala
|
agpl-3.0
| 3,032
|
package monocle
import monocle.syntax.{AppliedFocusSyntax, ComposedFocusSyntax}
import monocle.internal.focus.FocusImpl
import monocle.function.{At, Each, Index}
object Focus extends AppliedFocusSyntax with ComposedFocusSyntax {
sealed trait KeywordContext {
extension [From](from: From)
def as[CastTo <: From]: CastTo =
scala.sys.error("Extension method 'as[CastTo]' should only be used within the monocle.Focus macro.")
extension [A](opt: Option[A])
def some: A = scala.sys.error("Extension method 'some' should only be used within the monocle.Focus macro.")
extension [From, To](from: From)(using Each[From, To])
def each: To = scala.sys.error("Extension method 'each' should only be used within the monocle.Focus macro.")
extension [From, I, To](from: From)
def at(i: I)(using At[From, i.type, To]): To =
scala.sys.error("Extension method 'at(i)' should only be used within the monocle.Focus macro.")
extension [From, I, To](from: From)
def index(i: I)(using Index[From, I, To]): To =
scala.sys.error("Extension method 'index(i)' should only be used within the monocle.Focus macro.")
extension [A](from: Option[A])
def withDefault(defaultValue: A): A =
scala.sys.error("Extension method 'withDefault(value)' should only be used within the monocle.Focus macro.")
}
def apply[S] = new MkFocus[S]
class MkFocus[From] {
def apply(): Iso[From, From] = Iso.id
transparent inline def apply[To](inline lambda: (KeywordContext ?=> From => To)): Any =
${ FocusImpl('lambda) }
}
}
|
julien-truffaut/Monocle
|
core/shared/src/main/scala-3.x/monocle/Focus.scala
|
Scala
|
mit
| 1,601
|
package org.jetbrains.plugins.scala
package lang
package refactoring
package introduceField
import com.intellij.codeInsight.navigation.NavigationUtil
import com.intellij.ide.util.PsiClassListCellRenderer
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi._
import com.intellij.psi.search.PsiElementProcessor
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.annotations.Nls
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlock, ScExpression}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil._
import scala.collection.immutable.ArraySeq
/**
* Nikolay.Tropin
* 6/28/13
*/
abstract class ScalaIntroduceFieldHandlerBase extends ScalaRefactoringActionHandler {
val REFACTORING_NAME: String = ScalaBundle.message("introduce.field.title")
protected def isSuitableClass(elem: PsiElement, clazz: ScTemplateDefinition): Boolean
def afterClassChoosing[T <: PsiElement](elem: T, types: ArraySeq[ScType], project: Project, editor: Editor, file: PsiFile, @Nls title: String)
(action: IntroduceFieldContext[T] => Unit): Unit = {
try {
val classes = ScalaPsiUtil.getParents(elem, file).collect {
case t: ScTemplateDefinition if isSuitableClass(elem, t) => t
}.toArray[PsiClass]
classes.length match {
case 0 =>
case 1 => action(new IntroduceFieldContext[T](project, editor, file, elem, types, classes(0).asInstanceOf[ScTemplateDefinition]))
case _ =>
val selection = classes(0)
val processor = new PsiElementProcessor[PsiClass] {
override def execute(aClass: PsiClass): Boolean = {
action(new IntroduceFieldContext[T](project, editor, file, elem, types, aClass.asInstanceOf[ScTemplateDefinition]))
false
}
}
NavigationUtil.getPsiElementPopup(classes, new PsiClassListCellRenderer() {
override def getElementText(element: PsiClass): String = super.getElementText(element).replace("$", "")
}, title, processor, selection).showInBestPositionFor(editor)
}
}
catch {
case _: IntroduceException =>
}
}
protected def anchorForNewDeclaration(expr: ScExpression, occurrences: Seq[TextRange], aClass: ScTemplateDefinition): PsiElement = {
val firstOccOffset = occurrences.map(_.getStartOffset).min
val anchor = statementsAndMembersInClass(aClass).find(_.getTextRange.getEndOffset >= firstOccOffset)
anchor.getOrElse {
if (PsiTreeUtil.isAncestor(aClass.extendsBlock.templateBody.orNull, commonParent(aClass.getContainingFile, occurrences), false)) null
else {
aClass.extendsBlock match {
case ScExtendsBlock.EarlyDefinitions(earlyDef) => earlyDef.lastChild.orNull
case extBl => extBl.templateParents.orNull
}
}
}
}
}
object ScalaIntroduceFieldHandlerBase {
def canBeInitializedInDeclaration(expr: ScExpression, aClass: ScTemplateDefinition): Boolean = {
val stmtsAndMmbrs = statementsAndMembersInClass(aClass)
expr.withParentsInFile
.find(stmtsAndMmbrs.contains(_))
.forall(checkForwardReferences(expr, _))
}
def canBeInitInLocalScope[T <: PsiElement](ifc: IntroduceFieldContext[T], replaceAll: Boolean): Boolean = {
val occurrences = if (replaceAll) ifc.occurrences else Seq(ifc.element.getTextRange)
val parExpr: ScExpression = findParentExpr(commonParent(ifc.file, occurrences))
val stmtsAndMmbrs = statementsAndMembersInClass(ifc.aClass)
val containerIsLocal = container(parExpr).getOrElse(ifc.file)
.withParentsInFile
.exists(stmtsAndMmbrs.contains(_))
if (!containerIsLocal) false
else {
ifc.element match {
case expr: ScExpression => checkForwardReferences(expr, parExpr)
case _ => false
}
}
}
def anchorForInitializer(occurrences: Seq[TextRange], file: PsiFile): Option[PsiElement] = {
var firstRange = occurrences.head
val parExpr = findParentExpr(commonParent(file, occurrences))
if (parExpr == null) return None
val isNotBlock = !parExpr.isInstanceOf[ScBlock]
val parent =
if (isNotBlock && needBraces(parExpr, nextParent(parExpr, file))) {
firstRange = firstRange.shiftRight(1)
parExpr.replaceExpression(createExpressionFromText(s"{${parExpr.getText}}")(file.getManager),
removeParenthesis = false)
} else container(parExpr).getOrElse(file)
if (parent == null) None
else parent.getChildren.find(_.getTextRange.contains(firstRange))
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/introduceField/ScalaIntroduceFieldHandlerBase.scala
|
Scala
|
apache-2.0
| 5,116
|
class Test2(val valueVal: Test2) extends AnyVal // error: value class cannot wrap itself
|
som-snytt/dotty
|
tests/neg/i1642.scala
|
Scala
|
apache-2.0
| 89
|
// scalac: -Ystop-after:typer -Ymacro-expand:discard -nowarn
object Test {
"" match {
case Unapply(a, b) =>
a: Int
b: String
case UnapplySeq(a, b1, b2) =>
a: Int
b1: String
b2: String
}
}
// These used to fail `too many patterns` under -Ymacro-expand:discard
|
lrytz/scala
|
test/files/pos/t8934a/Test_2.scala
|
Scala
|
apache-2.0
| 301
|
package scalan
import scala.language.implicitConversions
import special.collection._
import special.wrappers.{WrappersSpecModule, WrappersModule}
import scalan.util.{MemoizedFunc}
import scalan.ExactIntegral._
trait Library extends Scalan
with WrappersModule
with WrappersSpecModule
with CollsModule
with SizesModule
with CostsModule
with ConcreteSizesModule
with ConcreteCostsModule
with MonoidsModule
with MonoidInstancesModule
with CostedOptionsModule {
import WOption._
import WRType._
import Coll._; import CollBuilder._;
import Size._
import Costed._; import CostedBuilder._
import CostedFunc._;
import WSpecialPredef._
type RSize[Val] = Ref[Size[Val]]
type RCosted[A] = Ref[Costed[A]]
type LazyRep[T] = MutableLazy[Ref[T]]
private val _liftElemMemo = new MemoizedFunc({
case eT: Elem[t] =>
val lT = Liftables.asLiftable[Any, t](eT.liftable)
liftableRType(lT).lift(eT.sourceType.asInstanceOf[RType[Any]])
})
implicit def liftElem[T](eT: Elem[T]): Ref[WRType[T]] = {
_liftElemMemo(eT).asInstanceOf[Ref[WRType[T]]] // asRep cannot be used for AnyRef
}
private val _specialPredef: LazyRep[WSpecialPredefCompanionCtor] = MutableLazy(RWSpecialPredef.value)
def specialPredef: Ref[WSpecialPredefCompanionCtor] = _specialPredef.value
override protected def onReset(): Unit = {
_specialPredef.reset()
_liftElemMemo.reset()
super.onReset()
}
def zeroSize[V](eVal: Elem[V]): RSize[V] = asRep[Size[V]](eVal match {
case pe: PairElem[a,b] => costedBuilder.mkSizePair(zeroSize[a](pe.eFst), zeroSize[b](pe.eSnd))
case ce: CollElem[_,_] =>
implicit val eItem = ce.eItem
costedBuilder.mkSizeColl(colBuilder.fromItems(zeroSize(eItem)))
case oe: WOptionElem[_,_] => costedBuilder.mkSizeOption(specialPredef.some(zeroSize(oe.eItem)))
case _: BaseElem[_] | _: EntityElem[_] => costedBuilder.mkSizePrim(0L, eVal)
case _ => !!!(s"Cannot create zeroSize($eVal)")
})
val CM = CollMethods
private val CBM = CollBuilderMethods
private val WOptionM = WOptionMethods
private val SPCM = WSpecialPredefCompanionMethods
def colBuilder: Ref[CollBuilder]
def costedBuilder: Ref[CostedBuilder]
def intPlusMonoid: Ref[Monoid[Int]]
def longPlusMonoid: Ref[Monoid[Long]]
val intPlusMonoidValue = new special.collection.MonoidBuilderInst().intPlusMonoid
val longPlusMonoidValue = new special.collection.MonoidBuilderInst().longPlusMonoid
object IsNumericToInt {
def unapply(d: Def[_]): Nullable[Ref[A] forSome {type A}] = d match {
case ApplyUnOp(_: NumericToInt[_], x) => Nullable(x.asInstanceOf[Ref[A] forSome {type A}])
case _ => Nullable.None
}
}
object IsNumericToLong {
def unapply(d: Def[_]): Nullable[Ref[A] forSome {type A}] = d match {
case ApplyUnOp(_: NumericToLong[_], x) => Nullable(x.asInstanceOf[Ref[A] forSome {type A}])
case _ => Nullable.None
}
}
override def rewriteDef[T](d: Def[T]) = d match {
case CM.length(ys) => ys.node match {
// Rule: xs.map(f).length ==> xs.length
case CM.map(xs, _) =>
xs.length
// Rule: replicate(len, v).length => len
case CBM.replicate(_, len, _) =>
len
// Rule: Const[Coll[T]](coll).length =>
case CollConst(coll, _) =>
coll.length
// Rule: Coll(items @ Seq(x1, x2, x3)).length => items.length
case CBM.fromItems(_, items, _) =>
items.length
case _ => super.rewriteDef(d)
}
case IsNumericToLong(Def(IsNumericToInt(x))) if x.elem == LongElement => x
// Rule: replicate(l, x).zip(replicate(l, y)) ==> replicate(l, (x,y))
case CM.zip(CBM.replicate(b1, l1, v1), CBM.replicate(b2, l2, v2)) if b1 == b2 && l1 == l2 =>
b1.replicate(l1, Pair(v1, v2))
case CM.map(xs, _f) => _f.node match {
case IdentityLambda() => xs
case _ => xs.node match {
// Rule: replicate(l, v).map(f) ==> replicate(l, f(v))
case CBM.replicate(b, l, v: Ref[a]) =>
val f = asRep[a => Any](_f)
b.replicate(l, Apply(f, v, false))
// Rule: xs.map(f).map(g) ==> xs.map(x => g(f(x)))
case CM.map(_xs, f: RFunc[a, b]) =>
implicit val ea = f.elem.eDom
val xs = asRep[Coll[a]](_xs)
val g = asRep[b => Any](_f)
xs.map[Any](fun { x: Ref[a] => g(f(x)) })
case _ => super.rewriteDef(d)
}
}
case CM.sum(xs, m) => m.node match {
case _: IntPlusMonoid => xs.node match {
case CollConst(coll, lA) if lA.eW == IntElement =>
coll.asInstanceOf[SColl[Int]].sum(intPlusMonoidValue)
case CBM.replicate(_, n, x: Ref[Int] @unchecked) =>
x * n
case _ => super.rewriteDef(d)
}
case _: LongPlusMonoid => xs.node match {
case CollConst(coll, lA) if lA.eW == LongElement =>
coll.asInstanceOf[SColl[Long]].sum(longPlusMonoidValue)
case CBM.replicate(_, n, x: Ref[Long] @unchecked) =>
x * n.toLong
case _ => super.rewriteDef(d)
}
case _ => super.rewriteDef(d)
}
// Rule: opt.fold(None, x => Some(x)) ==> opt
case WOptionM.fold(opt, Def(ThunkDef(SPCM.none(_), _)), Def(Lambda(_, _, x, SPCM.some(y)))) if x == y => opt
case WOptionM.getOrElse(opt, _) => opt.node match {
// Rule: Some(x).getOrElse(_) ==> x
case SPCM.some(x) => x
case WOptionConst(Some(x), lA) => lA.lift(x)
case _ => super.rewriteDef(d)
}
case _ => super.rewriteDef(d)
}
override def invokeUnlifted(e: Elem[_], mc: MethodCall, dataEnv: DataEnv): AnyRef = e match {
case _: CollElem[_,_] => mc match {
case CollMethods.map(xs, f) =>
val newMC = mc.copy(args = mc.args :+ f.elem.eRange)(mc.resultType, mc.isAdapterCall)
super.invokeUnlifted(e, newMC, dataEnv)
case _ =>
super.invokeUnlifted(e, mc, dataEnv)
}
case _ =>
super.invokeUnlifted(e, mc, dataEnv)
}
}
|
ScorexFoundation/sigmastate-interpreter
|
library/src/main/scala/scalan/Library.scala
|
Scala
|
mit
| 5,966
|
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package no.uio.musit.service
import akka.stream.Materializer
import no.uio.musit.test.MusitSpecWithAppPerSuite
import play.api.mvc.Results.Ok
import play.api.test.FakeRequest
import scala.concurrent.Future
class AccessLogFilterSpec extends MusitSpecWithAppPerSuite {
implicit lazy val materializer: Materializer = app.materializer
"The AccessLogFilter" should {
"""should set a "Processing-Time" header""" in {
val filter = new AccessLogFilter()
val result = filter(request => Future.successful(Ok))(FakeRequest())
val headers = result.futureValue.header.headers
headers.get("Processing-Time") must not be None
}
}
}
|
kpmeen/musit
|
musit-service/src/test/scala/no/uio/musit/service/AccessLogFilterSpec.scala
|
Scala
|
gpl-2.0
| 1,529
|
package com.ing.baker.runtime.akka.actor.serialization
import com.ing.baker.runtime.serialization.Encryption._
import org.scalacheck.Gen._
import org.scalacheck.Prop.forAll
import org.scalacheck._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.Checkers
class EncryptionPropertiesSpec extends AnyFunSuite with Checkers {
val desEncryptionGen: Gen[DESEncryption] = for {
keyChars β Gen.listOfN(8, alphaChar)
} yield new DESEncryption(keyChars.mkString)
val aesEncryptionGen: Gen[AESEncryption] = for {
keyChars β Gen.listOfN(16, alphaChar)
} yield new AESEncryption(keyChars.mkString)
val keyAndTextGen: Gen[(JavaCryptoEncryption, String)] = for {
algorithm β Gen.oneOf(aesEncryptionGen, desEncryptionGen)
text β Gen.alphaStr
} yield (algorithm, text)
test("(AES|DES)Encryption: decrypt(encrypt(plaintext)) should be plaintext") {
val property = forAll(keyAndTextGen) {
case (encryption: JavaCryptoEncryption, plainText: String) =>
val encryptedBytes = encryption.encrypt(plainText.getBytes)
val decryptedPlainText = new String(encryption.decrypt(encryptedBytes))
plainText == decryptedPlainText
}
check(property, Test.Parameters.defaultVerbose.withMinSuccessfulTests(10))
}
}
|
ing-bank/baker
|
core/akka-runtime/src/test/scala/com/ing/baker/runtime/akka/actor/serialization/EncryptionPropertiesSpec.scala
|
Scala
|
mit
| 1,296
|
package controllers.forms
import play.api.data.{Form,Forms}
import com.overviewdocs.models.View
object ViewUpdateAttributesForm {
def apply() = Form(
Forms.mapping("title" -> Forms.nonEmptyText)
((title) => View.UpdateAttributes(title=title))
((attributes) => Some((attributes.title)))
)
}
|
overview/overview-server
|
web/app/controllers/forms/ViewUpdateAttributesForm.scala
|
Scala
|
agpl-3.0
| 309
|
package quizleague.rest.mail
import javax.servlet.http.HttpServlet
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import java.util.Properties
import javax.mail.Session
import javax.mail.internet.MimeMessage
import javax.mail.internet.InternetAddress
import quizleague.domain._
import javax.mail.Address
import java.util.logging.Logger
import java.util.logging.Level
import javax.mail.Message.RecipientType
import javax.mail.Transport
import quizleague.data.Storage._
import quizleague.conversions.RefConversions._
import quizleague.util.json.codecs.DomainCodecs._
import quizleague.data._
class MailHandler extends HttpServlet {
val LOG: Logger = Logger.getLogger(classOf[MailHandler].getName());
override def doPost(req: HttpServletRequest, resp: HttpServletResponse): Unit = {
val props: Properties = new Properties;
val session: Session = Session.getDefaultInstance(props, null);
try {
val message = new MimeMessage(session,
req.getInputStream());
val globaldata = applicationContext()
val recipientParts = req.getPathInfo().replaceFirst("/", "").split("@")
val recipientName = recipientParts(0);
globaldata.emailAliases.filter(_.alias == recipientName).foreach { alias => sendMail(message, globaldata, new InternetAddress(alias.user.email)); return }
list[Team].filter(_.emailName == recipientName).foreach { team:Team =>
val addresses: Array[Address] = team.users.map { a => new InternetAddress(a.email) }.toArray
sendMail(message, globaldata, addresses: _*)
return
}
LOG.fine("No matching addressees for any recipients");
} catch {
case e: Exception => LOG.log(Level.SEVERE, "Failure recieving mail", e);
}
}
def sendMail(message: MimeMessage, globaldata: ApplicationContext,
addresses: Address*): Unit =
{
try {
val from = message.getFrom
message.setSender(new InternetAddress(globaldata.senderEmail))
message.setReplyTo(from)
message.setRecipients(RecipientType.TO, addresses.toArray);
message.setSubject(s"Sent via ${globaldata.leagueName} : ${message.getSubject}");
LOG.fine(s"${message.getFrom()(0)} to ${message.getAllRecipients()(0).toString}");
Transport.send(message);
} catch {
case e: Exception => {
LOG.log(Level.SEVERE, "Failure sending mail", e);
val session = Session.getDefaultInstance(new Properties(), null);
val notification = new MimeMessage(session);
notification.addRecipient(RecipientType.TO, message.getFrom()(0));
notification.setSender(new InternetAddress(globaldata.senderEmail));
notification.setSubject(s"${globaldata.leagueName} : Message delivery failed");
notification.setText("Message delivery failed, probably due to an attachment.\\nThis mail service does not allow attachments. Try resending as text only.");
Transport.send(notification);
}
}
}
}
|
gumdrop/quizleague
|
jvm/src/main/scala/quizleague/rest/mail/MailHandler.scala
|
Scala
|
mit
| 3,087
|
package lila.common
import lila.common.PimpedJson._
import play.api.libs.json.{ Json, OWrites }
case class LightUser(
id: String,
name: String,
title: Option[String],
isPatron: Boolean) {
def titleName = title.fold(name)(_ + " " + name)
def titleNameHtml = title.fold(name)(_ + " " + name)
}
object LightUser {
implicit val lightUserWrites = OWrites[LightUser] { u =>
Json.obj(
"id" -> u.id,
"name" -> u.name,
"title" -> u.title,
"patron" -> u.isPatron).noNull
}
type Getter = String => Option[LightUser]
}
|
clarkerubber/lila
|
modules/common/src/main/LightUser.scala
|
Scala
|
agpl-3.0
| 572
|
package com.sksamuel.avro4s.github
import com.sksamuel.avro4s._
import org.scalatest.{FunSuite, Matchers}
case class TestEntry(name: String)
sealed trait TestClass // class or trait
final case class Test(id: Int, entries: List[TestEntry]) extends TestClass // <-- list
case class ContainsTestClass(testClass: TestClass)
class GithubIssue234 extends FunSuite with Matchers {
test("RecordFormat macro for List - diverging implicit expansion for type #234") {
val format: RecordFormat[ContainsTestClass] = RecordFormat[ContainsTestClass]
}
}
|
51zero/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/github/GithubIssue234.scala
|
Scala
|
mit
| 551
|
/**
* Copyright (C) ${project.inceptionYear} TU Berlin (alexander.alexandrov@tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tu_berlin.dima.experiments.flink.hashagg.flink
import org.apache.flink.api.common.operators.base.ReduceOperatorBase.CombineHint
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem
/** Get the largest value per group based on lexicographic ordering. */
object WorkloadY {
def main(args: Array[String]) {
if (args.length != 3) {
Console.err.println("Usage: <jar> combiner-strategy inputPath outputPath")
System.exit(-1)
}
val combineHint = args(0).toLowerCase match {
case "hash" =>
CombineHint.HASH
case "sort" =>
CombineHint.SORT
case _ =>
CombineHint.OPTIMIZER_CHOOSES
}
val inputPath = args(1)
val outputPath = args(2)
val env = ExecutionEnvironment.getExecutionEnvironment
env
.readCsvFile[(Long, String)](inputPath)
.groupBy(0)
.reduce((x, y) => (x._1, if (x._2 > y._2) x._2 else y._2), combineHint)
.writeAsCsv(outputPath, writeMode = FileSystem.WriteMode.OVERWRITE)
env.execute(s"WorkloadY[${combineHint.toString.toLowerCase}]")
}
}
|
TU-Berlin-DIMA/flink-hashagg
|
flink-hashagg-flink-jobs/src/main/scala/de/tu_berlin/dima/experiments/flink/hashagg/flink/WorkloadY.scala
|
Scala
|
apache-2.0
| 1,761
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.IOException
import java.net.Socket
import java.util.concurrent.CopyOnWriteArrayList
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.collection.JavaConversions._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.net.NetUtils
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.ShutdownHookManager
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.ipc.YarnRPC
import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
import org.apache.spark.{SparkConf, SparkContext, Logging}
import org.apache.spark.util.Utils
class ApplicationMaster(args: ApplicationMasterArguments, conf: Configuration,
sparkConf: SparkConf) extends Logging {
def this(args: ApplicationMasterArguments, sparkConf: SparkConf) =
this(args, new Configuration(), sparkConf)
def this(args: ApplicationMasterArguments) = this(args, new SparkConf())
private val rpc: YarnRPC = YarnRPC.create(conf)
private var resourceManager: AMRMProtocol = _
private var appAttemptId: ApplicationAttemptId = _
private var userThread: Thread = _
private val yarnConf: YarnConfiguration = new YarnConfiguration(conf)
private val fs = FileSystem.get(yarnConf)
private var yarnAllocator: YarnAllocationHandler = _
private var isFinished: Boolean = false
private var uiAddress: String = _
private val maxAppAttempts: Int = conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES,
YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES)
private var isLastAMRetry: Boolean = true
// Default to numWorkers * 2, with minimum of 3
private val maxNumWorkerFailures = sparkConf.getInt("spark.yarn.max.worker.failures",
math.max(args.numWorkers * 2, 3))
def run() {
// Setup the directories so things go to yarn approved directories rather
// then user specified and /tmp.
System.setProperty("spark.local.dir", getLocalDirs())
// set the web ui port to be ephemeral for yarn so we don't conflict with
// other spark processes running on the same box
System.setProperty("spark.ui.port", "0")
// Use priority 30 as its higher then HDFS. Its same priority as MapReduce is using.
ShutdownHookManager.get().addShutdownHook(new AppMasterShutdownHook(this), 30)
appAttemptId = getApplicationAttemptId()
isLastAMRetry = appAttemptId.getAttemptId() >= maxAppAttempts
resourceManager = registerWithResourceManager()
// Workaround until hadoop moves to something which has
// https://issues.apache.org/jira/browse/HADOOP-8406 - fixed in (2.0.2-alpha but no 0.23 line)
// ignore result.
// This does not, unfortunately, always work reliably ... but alleviates the bug a lot of times
// Hence args.workerCores = numCore disabled above. Any better option?
// Compute number of threads for akka
//val minimumMemory = appMasterResponse.getMinimumResourceCapability().getMemory()
//if (minimumMemory > 0) {
// val mem = args.workerMemory + YarnAllocationHandler.MEMORY_OVERHEAD
// val numCore = (mem / minimumMemory) + (if (0 != (mem % minimumMemory)) 1 else 0)
// if (numCore > 0) {
// do not override - hits https://issues.apache.org/jira/browse/HADOOP-8406
// TODO: Uncomment when hadoop is on a version which has this fixed.
// args.workerCores = numCore
// }
//}
// org.apache.hadoop.io.compress.CompressionCodecFactory.getCodecClasses(conf)
ApplicationMaster.register(this)
// Start the user's JAR
userThread = startUserClass()
// This a bit hacky, but we need to wait until the spark.driver.port property has
// been set by the Thread executing the user class.
waitForSparkContextInitialized()
// Do this after spark master is up and SparkContext is created so that we can register UI Url
val appMasterResponse: RegisterApplicationMasterResponse = registerApplicationMaster()
// Allocate all containers
allocateWorkers()
// Wait for the user class to Finish
userThread.join()
System.exit(0)
}
/** Get the Yarn approved local directories. */
private def getLocalDirs(): String = {
// Hadoop 0.23 and 2.x have different Environment variable names for the
// local dirs, so lets check both. We assume one of the 2 is set.
// LOCAL_DIRS => 2.X, YARN_LOCAL_DIRS => 0.23.X
val localDirs = Option(System.getenv("YARN_LOCAL_DIRS"))
.orElse(Option(System.getenv("LOCAL_DIRS")))
localDirs match {
case None => throw new Exception("Yarn Local dirs can't be empty")
case Some(l) => l
}
}
private def getApplicationAttemptId(): ApplicationAttemptId = {
val envs = System.getenv()
val containerIdString = envs.get(ApplicationConstants.AM_CONTAINER_ID_ENV)
val containerId = ConverterUtils.toContainerId(containerIdString)
val appAttemptId = containerId.getApplicationAttemptId()
logInfo("ApplicationAttemptId: " + appAttemptId)
appAttemptId
}
private def registerWithResourceManager(): AMRMProtocol = {
val rmAddress = NetUtils.createSocketAddr(yarnConf.get(
YarnConfiguration.RM_SCHEDULER_ADDRESS,
YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS))
logInfo("Connecting to ResourceManager at " + rmAddress)
rpc.getProxy(classOf[AMRMProtocol], rmAddress, conf).asInstanceOf[AMRMProtocol]
}
private def registerApplicationMaster(): RegisterApplicationMasterResponse = {
logInfo("Registering the ApplicationMaster")
val appMasterRequest = Records.newRecord(classOf[RegisterApplicationMasterRequest])
.asInstanceOf[RegisterApplicationMasterRequest]
appMasterRequest.setApplicationAttemptId(appAttemptId)
// Setting this to master host,port - so that the ApplicationReport at client has some
// sensible info.
// Users can then monitor stderr/stdout on that node if required.
appMasterRequest.setHost(Utils.localHostName())
appMasterRequest.setRpcPort(0)
appMasterRequest.setTrackingUrl(uiAddress)
resourceManager.registerApplicationMaster(appMasterRequest)
}
private def startUserClass(): Thread = {
logInfo("Starting the user JAR in a separate Thread")
val mainMethod = Class.forName(
args.userClass,
false /* initialize */ ,
Thread.currentThread.getContextClassLoader).getMethod("main", classOf[Array[String]])
val t = new Thread {
override def run() {
var successed = false
try {
// Copy
var mainArgs: Array[String] = new Array[String](args.userArgs.size)
args.userArgs.copyToArray(mainArgs, 0, args.userArgs.size)
mainMethod.invoke(null, mainArgs)
// some job script has "System.exit(0)" at the end, for example SparkPi, SparkLR
// userThread will stop here unless it has uncaught exception thrown out
// It need shutdown hook to set SUCCEEDED
successed = true
} finally {
logDebug("finishing main")
isLastAMRetry = true
if (successed) {
ApplicationMaster.this.finishApplicationMaster(FinalApplicationStatus.SUCCEEDED)
} else {
ApplicationMaster.this.finishApplicationMaster(FinalApplicationStatus.FAILED)
}
}
}
}
t.start()
t
}
// this need to happen before allocateWorkers
private def waitForSparkContextInitialized() {
logInfo("Waiting for spark context initialization")
try {
var sparkContext: SparkContext = null
ApplicationMaster.sparkContextRef.synchronized {
var count = 0
val waitTime = 10000L
val numTries = sparkConf.getInt("spark.yarn.ApplicationMaster.waitTries", 10)
while (ApplicationMaster.sparkContextRef.get() == null && count < numTries) {
logInfo("Waiting for spark context initialization ... " + count)
count = count + 1
ApplicationMaster.sparkContextRef.wait(waitTime)
}
sparkContext = ApplicationMaster.sparkContextRef.get()
assert(sparkContext != null || count >= numTries)
if (null != sparkContext) {
uiAddress = sparkContext.ui.appUIAddress
this.yarnAllocator = YarnAllocationHandler.newAllocator(
yarnConf,
resourceManager,
appAttemptId,
args,
sparkContext.preferredNodeLocationData,
sparkContext.getConf)
} else {
logWarning("Unable to retrieve sparkContext inspite of waiting for %d, numTries = %d".
format(count * waitTime, numTries))
this.yarnAllocator = YarnAllocationHandler.newAllocator(
yarnConf,
resourceManager,
appAttemptId,
args,
sparkContext.getConf)
}
}
} finally {
// in case of exceptions, etc - ensure that count is atleast ALLOCATOR_LOOP_WAIT_COUNT :
// so that the loop (in ApplicationMaster.sparkContextInitialized) breaks
ApplicationMaster.incrementAllocatorLoop(ApplicationMaster.ALLOCATOR_LOOP_WAIT_COUNT)
}
}
private def allocateWorkers() {
try {
logInfo("Allocating " + args.numWorkers + " workers.")
// Wait until all containers have finished
// TODO: This is a bit ugly. Can we make it nicer?
// TODO: Handle container failure
// Exists the loop if the user thread exits.
while (yarnAllocator.getNumWorkersRunning < args.numWorkers && userThread.isAlive) {
if (yarnAllocator.getNumWorkersFailed >= maxNumWorkerFailures) {
finishApplicationMaster(FinalApplicationStatus.FAILED,
"max number of worker failures reached")
}
yarnAllocator.allocateContainers(
math.max(args.numWorkers - yarnAllocator.getNumWorkersRunning, 0))
ApplicationMaster.incrementAllocatorLoop(1)
Thread.sleep(100)
}
} finally {
// In case of exceptions, etc - ensure that count is at least ALLOCATOR_LOOP_WAIT_COUNT,
// so that the loop in ApplicationMaster#sparkContextInitialized() breaks.
ApplicationMaster.incrementAllocatorLoop(ApplicationMaster.ALLOCATOR_LOOP_WAIT_COUNT)
}
logInfo("All workers have launched.")
// Launch a progress reporter thread, else the app will get killed after expiration
// (def: 10mins) timeout.
// TODO(harvey): Verify the timeout
if (userThread.isAlive) {
// Ensure that progress is sent before YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS elapses.
val timeoutInterval = yarnConf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 120000)
// we want to be reasonably responsive without causing too many requests to RM.
val schedulerInterval =
sparkConf.getLong("spark.yarn.scheduler.heartbeat.interval-ms", 5000)
// must be <= timeoutInterval / 2.
val interval = math.min(timeoutInterval / 2, schedulerInterval)
launchReporterThread(interval)
}
}
private def launchReporterThread(_sleepTime: Long): Thread = {
val sleepTime = if (_sleepTime <= 0) 0 else _sleepTime
val t = new Thread {
override def run() {
while (userThread.isAlive) {
if (yarnAllocator.getNumWorkersFailed >= maxNumWorkerFailures) {
finishApplicationMaster(FinalApplicationStatus.FAILED,
"max number of worker failures reached")
}
val missingWorkerCount = args.numWorkers - yarnAllocator.getNumWorkersRunning
if (missingWorkerCount > 0) {
logInfo("Allocating %d containers to make up for (potentially) lost containers".
format(missingWorkerCount))
yarnAllocator.allocateContainers(missingWorkerCount)
}
else sendProgress()
Thread.sleep(sleepTime)
}
}
}
// Setting to daemon status, though this is usually not a good idea.
t.setDaemon(true)
t.start()
logInfo("Started progress reporter thread - sleep time : " + sleepTime)
t
}
private def sendProgress() {
logDebug("Sending progress")
// Simulated with an allocate request with no nodes requested ...
yarnAllocator.allocateContainers(0)
}
/*
def printContainers(containers: List[Container]) = {
for (container <- containers) {
logInfo("Launching shell command on a new container."
+ ", containerId=" + container.getId()
+ ", containerNode=" + container.getNodeId().getHost()
+ ":" + container.getNodeId().getPort()
+ ", containerNodeURI=" + container.getNodeHttpAddress()
+ ", containerState" + container.getState()
+ ", containerResourceMemory"
+ container.getResource().getMemory())
}
}
*/
def finishApplicationMaster(status: FinalApplicationStatus, diagnostics: String = "") {
synchronized {
if (isFinished) {
return
}
isFinished = true
}
logInfo("finishApplicationMaster with " + status)
val finishReq = Records.newRecord(classOf[FinishApplicationMasterRequest])
.asInstanceOf[FinishApplicationMasterRequest]
finishReq.setAppAttemptId(appAttemptId)
finishReq.setFinishApplicationStatus(status)
finishReq.setDiagnostics(diagnostics)
// Set tracking url to empty since we don't have a history server.
finishReq.setTrackingUrl("")
resourceManager.finishApplicationMaster(finishReq)
}
/**
* Clean up the staging directory.
*/
private def cleanupStagingDir() {
var stagingDirPath: Path = null
try {
val preserveFiles = sparkConf.get("spark.yarn.preserve.staging.files", "false").toBoolean
if (!preserveFiles) {
stagingDirPath = new Path(System.getenv("SPARK_YARN_STAGING_DIR"))
if (stagingDirPath == null) {
logError("Staging directory is null")
return
}
logInfo("Deleting staging directory " + stagingDirPath)
fs.delete(stagingDirPath, true)
}
} catch {
case ioe: IOException =>
logError("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
// The shutdown hook that runs when a signal is received AND during normal close of the JVM.
class AppMasterShutdownHook(appMaster: ApplicationMaster) extends Runnable {
def run() {
logInfo("AppMaster received a signal.")
// we need to clean up staging dir before HDFS is shut down
// make sure we don't delete it until this is the last AM
if (appMaster.isLastAMRetry) appMaster.cleanupStagingDir()
}
}
}
object ApplicationMaster {
// Number of times to wait for the allocator loop to complete.
// Each loop iteration waits for 100ms, so maximum of 3 seconds.
// This is to ensure that we have reasonable number of containers before we start
// TODO: Currently, task to container is computed once (TaskSetManager) - which need not be
// optimal as more containers are available. Might need to handle this better.
private val ALLOCATOR_LOOP_WAIT_COUNT = 30
def incrementAllocatorLoop(by: Int) {
val count = yarnAllocatorLoop.getAndAdd(by)
if (count >= ALLOCATOR_LOOP_WAIT_COUNT) {
yarnAllocatorLoop.synchronized {
// to wake threads off wait ...
yarnAllocatorLoop.notifyAll()
}
}
}
private val applicationMasters = new CopyOnWriteArrayList[ApplicationMaster]()
def register(master: ApplicationMaster) {
applicationMasters.add(master)
}
val sparkContextRef: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null /* initialValue */)
val yarnAllocatorLoop: AtomicInteger = new AtomicInteger(0)
def sparkContextInitialized(sc: SparkContext): Boolean = {
var modified = false
sparkContextRef.synchronized {
modified = sparkContextRef.compareAndSet(null, sc)
sparkContextRef.notifyAll()
}
// Add a shutdown hook - as a best case effort in case users do not call sc.stop or do
// System.exit.
// Should not really have to do this, but it helps YARN to evict resources earlier.
// Not to mention, prevent the Client from declaring failure even though we exited properly.
// Note that this will unfortunately not properly clean up the staging files because it gets
// called too late, after the filesystem is already shutdown.
if (modified) {
Runtime.getRuntime().addShutdownHook(new Thread with Logging {
// This is not only logs, but also ensures that log system is initialized for this instance
// when we are actually 'run'-ing.
logInfo("Adding shutdown hook for context " + sc)
override def run() {
logInfo("Invoking sc stop from shutdown hook")
sc.stop()
// Best case ...
for (master <- applicationMasters) {
master.finishApplicationMaster(FinalApplicationStatus.SUCCEEDED)
}
}
})
}
// Wait for initialization to complete and atleast 'some' nodes can get allocated.
yarnAllocatorLoop.synchronized {
while (yarnAllocatorLoop.get() <= ALLOCATOR_LOOP_WAIT_COUNT) {
yarnAllocatorLoop.wait(1000L)
}
}
modified
}
def main(argStrings: Array[String]) {
val args = new ApplicationMasterArguments(argStrings)
new ApplicationMaster(args).run()
}
}
|
sryza/spark
|
yarn/alpha/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
|
Scala
|
apache-2.0
| 18,316
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.bijection
import scala.util.Success
/**
* A common injection on numbers: N -> (m = N mod K, (N-m)/K) The first element in result tuple is
* always [0, modulus)
*/
class IntModDivInjection(val modulus: Int) extends Injection[Int, (Int, Int)] {
require(modulus > 0, "Modulus must be positive: " + modulus)
override def apply(n: Int) = {
val cmod = n % modulus
val mod = if (cmod < 0) cmod + modulus else cmod
val div = n / modulus
val toNegInf = if ((n < 0) && (mod != 0)) div - 1 else div
(mod, toNegInf)
}
private val maxDiv = Int.MaxValue / modulus
private val minDiv = (Int.MinValue / modulus) - 1
override def invert(moddiv: (Int, Int)) = {
val (mod, div) = moddiv
val res = div * modulus + mod
if (
mod >= 0 && mod < modulus && div <= maxDiv && div >= minDiv &&
// We could wrap around if we get bad input:
((res >= 0) == (div >= 0))
) Success(res)
else InversionFailure.failedAttempt(moddiv)
}
}
/**
* A common injection on numbers: N -> (m = N mod K, (N-m)/K) The first element in result tuple is
* always [0, modulus)
*/
class LongModDivInjection(val modulus: Long) extends Injection[Long, (Long, Long)] {
require(modulus > 0, "Modulus must be positive: " + modulus)
override def apply(n: Long) = {
val cmod = n % modulus
val mod = if (cmod < 0) cmod + modulus else cmod
val div = n / modulus
val toNegInf = if ((n < 0) && (mod != 0)) div - 1L else div
(mod, toNegInf)
}
private val maxDiv = Long.MaxValue / modulus
private val minDiv = (Long.MinValue / modulus) - 1L
override def invert(moddiv: (Long, Long)) = {
val (mod, div) = moddiv
val res = div * modulus + mod
if (
mod >= 0 && mod < modulus && div <= maxDiv && div >= minDiv &&
((res >= 0) == (div >= 0))
) Success(res)
else InversionFailure.failedAttempt(moddiv)
}
}
|
twitter/bijection
|
bijection-core/src/main/scala/com/twitter/bijection/ModDivInjection.scala
|
Scala
|
apache-2.0
| 2,467
|
import cats.instances.future._
import cats.syntax.functor._
import com.bot4s.telegram.future.Polling
import com.bot4s.telegram.methods._
import scala.concurrent.Future
import com.bot4s.telegram.api.declarative.Commands
class MarkdownBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] {
onCommand("markdownV2") { implicit msg =>
reply(
"""
*MardownV2*
*bold \\*text*
_italic \\*text_
__underline__
~strikethrough~
*bold _italic bold ~italic bold strikethrough~ __underline italic bold___ bold*
[inline URL](http://www.bot4s.com/)
[inline mention of a user](tg://user?id=123456789)
`inline fixed-width code`
```
pre-formatted fixed-width code block
```
```python
pre-formatted fixed-width code block written in the Python programming language
```
""",
Some(ParseMode.MarkdownV2)
).void
}
onCommand("markdown") { implicit msg =>
reply(
"""
*Mardown parsing*
*bold text*
_italic text_
[inline URL](http://www.bot4s.com/)
[inline mention of a user](tg://user?id=123456789)
`inline fixed-width code`
```
pre-formatted fixed-width code block
```
```python
pre-formatted fixed-width code block written in the Python programming language
```
""",
Some(ParseMode.Markdown)
).void
}
onCommand("html") { implicit msg =>
reply(
"""
<b>HTML Parser</b>
<b>bold</b>, <strong>bold</strong>
<i>italic</i>, <em>italic</em>
<u>underline</u>, <ins>underline</ins>
<s>strikethrough</s>, <strike>strikethrough</strike>, <del>strikethrough</del>
<b>bold <i>italic bold <s>italic bold strikethrough</s> <u>underline italic bold</u></i> bold</b>
<a href="http://www.bot4s.com/">inline URL</a>
<a href="tg://user?id=123456789">inline mention of a user</a>
<code>inline fixed-width code</code>
<pre>pre-formatted fixed-width code block</pre>
<pre><code class="language-python">pre-formatted fixed-width code block written in the Python programming language</code></pre>
""",
Some(ParseMode.HTML)
).void
}
}
|
mukel/telegrambot4s
|
examples/src/MarkdownBot.scala
|
Scala
|
apache-2.0
| 2,001
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.nio.charset.StandardCharsets
import java.time.{Duration, Instant, LocalDate, LocalDateTime, Period, ZoneOffset}
import java.time.temporal.ChronoUnit
import java.util.TimeZone
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection}
import org.apache.spark.sql.catalyst.encoders.ExamplePointUDT
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
class LiteralExpressionSuite extends SparkFunSuite with ExpressionEvalHelper {
test("null") {
checkEvaluation(Literal.create(null, BooleanType), null)
checkEvaluation(Literal.create(null, ByteType), null)
checkEvaluation(Literal.create(null, ShortType), null)
checkEvaluation(Literal.create(null, IntegerType), null)
checkEvaluation(Literal.create(null, LongType), null)
checkEvaluation(Literal.create(null, FloatType), null)
checkEvaluation(Literal.create(null, DoubleType), null)
checkEvaluation(Literal.create(null, StringType), null)
checkEvaluation(Literal.create(null, BinaryType), null)
checkEvaluation(Literal.create(null, DecimalType.USER_DEFAULT), null)
checkEvaluation(Literal.create(null, DateType), null)
checkEvaluation(Literal.create(null, TimestampType), null)
checkEvaluation(Literal.create(null, CalendarIntervalType), null)
checkEvaluation(Literal.create(null, YearMonthIntervalType), null)
checkEvaluation(Literal.create(null, DayTimeIntervalType()), null)
checkEvaluation(Literal.create(null, ArrayType(ByteType, true)), null)
checkEvaluation(Literal.create(null, ArrayType(StringType, true)), null)
checkEvaluation(Literal.create(null, MapType(StringType, IntegerType)), null)
checkEvaluation(Literal.create(null, StructType(Seq.empty)), null)
}
test("default") {
checkEvaluation(Literal.default(BooleanType), false)
checkEvaluation(Literal.default(ByteType), 0.toByte)
checkEvaluation(Literal.default(ShortType), 0.toShort)
checkEvaluation(Literal.default(IntegerType), 0)
checkEvaluation(Literal.default(LongType), 0L)
checkEvaluation(Literal.default(FloatType), 0.0f)
checkEvaluation(Literal.default(DoubleType), 0.0)
checkEvaluation(Literal.default(StringType), "")
checkEvaluation(Literal.default(BinaryType), "".getBytes(StandardCharsets.UTF_8))
checkEvaluation(Literal.default(DecimalType.USER_DEFAULT), Decimal(0))
checkEvaluation(Literal.default(DecimalType.SYSTEM_DEFAULT), Decimal(0))
withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "false") {
checkEvaluation(Literal.default(DateType), DateTimeUtils.toJavaDate(0))
checkEvaluation(Literal.default(TimestampType), DateTimeUtils.toJavaTimestamp(0L))
}
withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") {
checkEvaluation(Literal.default(DateType), LocalDate.ofEpochDay(0))
checkEvaluation(Literal.default(TimestampType), Instant.ofEpochSecond(0))
}
checkEvaluation(Literal.default(CalendarIntervalType), new CalendarInterval(0, 0, 0L))
checkEvaluation(Literal.default(YearMonthIntervalType), 0)
checkEvaluation(Literal.default(DayTimeIntervalType()), 0L)
checkEvaluation(Literal.default(ArrayType(StringType)), Array())
checkEvaluation(Literal.default(MapType(IntegerType, StringType)), Map())
checkEvaluation(Literal.default(StructType(StructField("a", StringType) :: Nil)), Row(""))
// ExamplePointUDT.sqlType is ArrayType(DoubleType, false).
checkEvaluation(Literal.default(new ExamplePointUDT), Array())
}
test("boolean literals") {
checkEvaluation(Literal(true), true)
checkEvaluation(Literal(false), false)
checkEvaluation(Literal.create(true), true)
checkEvaluation(Literal.create(false), false)
}
test("int literals") {
List(0, 1, Int.MinValue, Int.MaxValue).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toLong), d.toLong)
checkEvaluation(Literal(d.toShort), d.toShort)
checkEvaluation(Literal(d.toByte), d.toByte)
checkEvaluation(Literal.create(d), d)
checkEvaluation(Literal.create(d.toLong), d.toLong)
checkEvaluation(Literal.create(d.toShort), d.toShort)
checkEvaluation(Literal.create(d.toByte), d.toByte)
}
checkEvaluation(Literal(Long.MinValue), Long.MinValue)
checkEvaluation(Literal(Long.MaxValue), Long.MaxValue)
checkEvaluation(Literal.create(Long.MinValue), Long.MinValue)
checkEvaluation(Literal.create(Long.MaxValue), Long.MaxValue)
}
test("double literals") {
List(0.0, -0.0, Double.NegativeInfinity, Double.PositiveInfinity).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toFloat), d.toFloat)
checkEvaluation(Literal.create(d), d)
checkEvaluation(Literal.create(d.toFloat), d.toFloat)
}
checkEvaluation(Literal(Double.MinValue), Double.MinValue)
checkEvaluation(Literal(Double.MaxValue), Double.MaxValue)
checkEvaluation(Literal(Float.MinValue), Float.MinValue)
checkEvaluation(Literal(Float.MaxValue), Float.MaxValue)
checkEvaluation(Literal.create(Double.MinValue), Double.MinValue)
checkEvaluation(Literal.create(Double.MaxValue), Double.MaxValue)
checkEvaluation(Literal.create(Float.MinValue), Float.MinValue)
checkEvaluation(Literal.create(Float.MaxValue), Float.MaxValue)
}
test("string literals") {
checkEvaluation(Literal(""), "")
checkEvaluation(Literal("test"), "test")
checkEvaluation(Literal("\\u0000"), "\\u0000")
checkEvaluation(Literal.create(""), "")
checkEvaluation(Literal.create("test"), "test")
checkEvaluation(Literal.create("\\u0000"), "\\u0000")
}
test("sum two literals") {
checkEvaluation(Add(Literal(1), Literal(1)), 2)
checkEvaluation(Add(Literal.create(1), Literal.create(1)), 2)
}
test("binary literals") {
checkEvaluation(Literal.create(new Array[Byte](0), BinaryType), new Array[Byte](0))
checkEvaluation(Literal.create(new Array[Byte](2), BinaryType), new Array[Byte](2))
checkEvaluation(Literal.create(new Array[Byte](0)), new Array[Byte](0))
checkEvaluation(Literal.create(new Array[Byte](2)), new Array[Byte](2))
}
test("decimal") {
List(-0.0001, 0.0, 0.001, 1.2, 1.1111, 5).foreach { d =>
checkEvaluation(Literal(Decimal(d)), Decimal(d))
checkEvaluation(Literal(Decimal(d.toInt)), Decimal(d.toInt))
checkEvaluation(Literal(Decimal(d.toLong)), Decimal(d.toLong))
checkEvaluation(Literal(Decimal((d * 1000L).toLong, 10, 3)),
Decimal((d * 1000L).toLong, 10, 3))
checkEvaluation(Literal(BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal(new java.math.BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal.create(Decimal(d)), Decimal(d))
checkEvaluation(Literal.create(Decimal(d.toInt)), Decimal(d.toInt))
checkEvaluation(Literal.create(Decimal(d.toLong)), Decimal(d.toLong))
checkEvaluation(Literal.create(Decimal((d * 1000L).toLong, 10, 3)),
Decimal((d * 1000L).toLong, 10, 3))
checkEvaluation(Literal.create(BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal.create(new java.math.BigDecimal(d.toString)), Decimal(d))
}
}
private def toCatalyst[T: TypeTag](value: T): Any = {
val ScalaReflection.Schema(dataType, _) = ScalaReflection.schemaFor[T]
CatalystTypeConverters.createToCatalystConverter(dataType)(value)
}
test("array") {
def checkArrayLiteral[T: TypeTag](a: Array[T]): Unit = {
checkEvaluation(Literal(a), toCatalyst(a))
checkEvaluation(Literal.create(a), toCatalyst(a))
}
checkArrayLiteral(Array(1, 2, 3))
checkArrayLiteral(Array("a", "b", "c"))
checkArrayLiteral(Array(1.0, 4.0))
checkArrayLiteral(Array(new CalendarInterval(1, 0, 0), new CalendarInterval(0, 1, 0)))
val arr = collection.mutable.WrappedArray.make(Array(1.0, 4.0))
checkEvaluation(Literal(arr), toCatalyst(arr))
}
test("seq") {
def checkSeqLiteral[T: TypeTag](a: Seq[T]): Unit = {
checkEvaluation(Literal.create(a), toCatalyst(a))
}
checkSeqLiteral(Seq(1, 2, 3))
checkSeqLiteral(Seq("a", "b", "c"))
checkSeqLiteral(Seq(1.0, 4.0))
checkSeqLiteral(Seq(new CalendarInterval(1, 0, 0), new CalendarInterval(0, 1, 0)))
checkSeqLiteral(Seq(Period.ZERO, Period.ofMonths(1)))
checkSeqLiteral(Seq(Duration.ZERO, Duration.ofDays(1)))
}
test("map") {
def checkMapLiteral[T: TypeTag](m: T): Unit = {
checkEvaluation(Literal.create(m), toCatalyst(m))
}
checkMapLiteral(Map("a" -> 1, "b" -> 2, "c" -> 3))
checkMapLiteral(Map("1" -> 1.0, "2" -> 2.0, "3" -> 3.0))
checkMapLiteral(Map(Period.ofMonths(1) -> Duration.ZERO))
assert(Literal.create(Map("a" -> 1)).toString === "map(keys: [a], values: [1])")
}
test("struct") {
def checkStructLiteral[T: TypeTag](s: T): Unit = {
checkEvaluation(Literal.create(s), toCatalyst(s))
}
checkStructLiteral((1, 3.0, "abcde"))
checkStructLiteral(("de", 1, 2.0f))
checkStructLiteral((1, ("fgh", 3.0)))
checkStructLiteral((Period.ZERO, ("abc", Duration.ofDays(1))))
}
test("unsupported types (map and struct) in Literal.apply") {
def checkUnsupportedTypeInLiteral(v: Any): Unit = {
val errMsgMap = intercept[RuntimeException] {
Literal(v)
}
assert(errMsgMap.getMessage.startsWith("Unsupported literal type"))
}
checkUnsupportedTypeInLiteral(Map("key1" -> 1, "key2" -> 2))
checkUnsupportedTypeInLiteral(("mike", 29, 1.0))
}
test("SPARK-24571: char literals") {
checkEvaluation(Literal('X'), "X")
checkEvaluation(Literal.create('0'), "0")
checkEvaluation(Literal('\\u0000'), "\\u0000")
checkEvaluation(Literal.create('\\n'), "\\n")
}
test("SPARK-33390: Make Literal support char array") {
checkEvaluation(Literal(Array('h', 'e', 'l', 'l', 'o')), "hello")
checkEvaluation(Literal(Array("hello".toCharArray)), Array("hello"))
// scalastyle:off
checkEvaluation(Literal(Array('ζ΅','θ―')), "ζ΅θ―")
checkEvaluation(Literal(Array('a', 'ζ΅', 'b', 'θ―', 'c')), "aζ΅bθ―c")
// scalastyle:on
}
test("construct literals from java.time.LocalDate") {
Seq(
LocalDate.of(1, 1, 1),
LocalDate.of(1582, 10, 1),
LocalDate.of(1600, 7, 30),
LocalDate.of(1969, 12, 31),
LocalDate.of(1970, 1, 1),
LocalDate.of(2019, 3, 20),
LocalDate.of(2100, 5, 17)).foreach { localDate =>
checkEvaluation(Literal(localDate), localDate)
}
}
test("construct literals from arrays of java.time.LocalDate") {
withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") {
val localDate0 = LocalDate.of(2019, 3, 20)
checkEvaluation(Literal(Array(localDate0)), Array(localDate0))
val localDate1 = LocalDate.of(2100, 4, 22)
checkEvaluation(Literal(Array(localDate0, localDate1)), Array(localDate0, localDate1))
}
}
test("construct literals from java.time.Instant") {
Seq(
Instant.parse("0001-01-01T00:00:00Z"),
Instant.parse("1582-10-01T01:02:03Z"),
Instant.parse("1970-02-28T11:12:13Z"),
Instant.ofEpochMilli(0),
Instant.parse("2019-03-20T10:15:30Z"),
Instant.parse("2100-12-31T22:17:31Z")).foreach { instant =>
checkEvaluation(Literal(instant), instant)
}
}
test("construct literals from arrays of java.time.Instant") {
withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") {
val instant0 = Instant.ofEpochMilli(0)
checkEvaluation(Literal(Array(instant0)), Array(instant0))
val instant1 = Instant.parse("2019-03-20T10:15:30Z")
checkEvaluation(Literal(Array(instant0, instant1)), Array(instant0, instant1))
}
}
private def withTimeZones(
sessionTimeZone: String,
systemTimeZone: String)(f: => Unit): Unit = {
withSQLConf(
SQLConf.SESSION_LOCAL_TIMEZONE.key -> sessionTimeZone,
SQLConf.DATETIME_JAVA8API_ENABLED.key -> "true") {
val originTimeZone = TimeZone.getDefault
try {
TimeZone.setDefault(TimeZone.getTimeZone(systemTimeZone))
f
} finally {
TimeZone.setDefault(originTimeZone)
}
}
}
test("format timestamp literal using spark.sql.session.timeZone") {
withTimeZones(sessionTimeZone = "GMT+01:00", systemTimeZone = "GMT-08:00") {
val timestamp = LocalDateTime.of(2019, 3, 21, 0, 2, 3, 456000000)
.atZone(ZoneOffset.UTC)
.toInstant
val expected = "TIMESTAMP '2019-03-21 01:02:03.456'"
val literalStr = Literal.create(timestamp).sql
assert(literalStr === expected)
}
}
test("format date literal independently from time zone") {
withTimeZones(sessionTimeZone = "GMT-11:00", systemTimeZone = "GMT-10:00") {
val date = LocalDate.of(2019, 3, 21)
val expected = "DATE '2019-03-21'"
val literalStr = Literal.create(date).sql
assert(literalStr === expected)
}
}
test("SPARK-33860: Make CatalystTypeConverters.convertToCatalyst match special Array value") {
assert(Literal(Array(1, 2, 3)) == Literal.create(Array(1, 2, 3), ArrayType(IntegerType)))
assert(Literal(Array(1L, 2L, 3L)) == Literal.create(Array(1L, 2L, 3L), ArrayType(LongType)))
assert(Literal(Array(1D, 2D, 3D)) == Literal.create(Array(1D, 2D, 3D), ArrayType(DoubleType)))
assert(Literal("123") == Literal.create(Array('1', '2', '3'), StringType))
assert(Literal(Array(1.toByte, 2.toByte, 3.toByte)) ==
Literal.create(Array(1.toByte, 2.toByte, 3.toByte), BinaryType))
assert(Literal(Array("1", "2", "3")) ==
Literal.create(Array("1", "2", "3"), ArrayType(StringType)))
assert(Literal(Array(Period.ofMonths(1))) ==
Literal.create(Array(Period.ofMonths(1)), ArrayType(YearMonthIntervalType)))
}
test("SPARK-34342: Date/Timestamp toString") {
assert(Literal.default(DateType).toString === "1970-01-01")
assert(Literal.default(TimestampType).toString === "1969-12-31 16:00:00")
withTimeZones(sessionTimeZone = "GMT+01:00", systemTimeZone = "GMT-08:00") {
val timestamp = LocalDateTime.of(2021, 2, 3, 16, 50, 3, 456000000)
.atZone(ZoneOffset.UTC)
.toInstant
val literalStr = Literal.create(timestamp).toString
assert(literalStr === "2021-02-03 17:50:03.456")
}
}
test("SPARK-35664: construct literals from java.time.LocalDateTime") {
Seq(
LocalDateTime.of(1, 1, 1, 0, 0, 0, 0),
LocalDateTime.of(2021, 5, 31, 23, 59, 59, 100),
LocalDateTime.of(2020, 2, 29, 23, 50, 57, 9999),
LocalDateTime.parse("9999-12-31T23:59:59.999999")
).foreach { dateTime =>
checkEvaluation(Literal(dateTime), dateTime)
}
}
test("SPARK-34605: construct literals from java.time.Duration") {
Seq(
Duration.ofNanos(0),
Duration.ofSeconds(-1),
Duration.ofNanos(123456000),
Duration.ofDays(106751991),
Duration.ofDays(-106751991)).foreach { duration =>
checkEvaluation(Literal(duration), duration)
}
}
test("SPARK-34605: construct literals from arrays of java.time.Duration") {
val duration0 = Duration.ofDays(2).plusHours(3).plusMinutes(4)
checkEvaluation(Literal(Array(duration0)), Array(duration0))
val duration1 = Duration.ofHours(-1024)
checkEvaluation(Literal(Array(duration0, duration1)), Array(duration0, duration1))
}
test("SPARK-34615: construct literals from java.time.Period") {
Seq(
Period.ofYears(0),
Period.of(-1, 11, 0),
Period.of(1, -11, 0),
Period.ofMonths(Int.MaxValue),
Period.ofMonths(Int.MinValue)).foreach { period =>
checkEvaluation(Literal(period), period)
}
}
test("SPARK-34615: construct literals from arrays of java.time.Period") {
val period0 = Period.ofYears(123).withMonths(456)
checkEvaluation(Literal(Array(period0)), Array(period0))
val period1 = Period.ofMonths(-1024)
checkEvaluation(Literal(Array(period0, period1)), Array(period0, period1))
}
test("SPARK-35099: convert a literal of day-time interval to SQL string") {
Seq(
Duration.ofDays(-1) -> "-1 00:00:00",
Duration.of(10, ChronoUnit.MICROS) -> "0 00:00:00.00001",
Duration.of(MICROS_PER_DAY - 1, ChronoUnit.MICROS) -> "0 23:59:59.999999"
).foreach { case (duration, intervalPayload) =>
val literal = Literal.apply(duration)
val expected = s"INTERVAL '$intervalPayload' DAY TO SECOND"
assert(literal.sql === expected)
assert(literal.toString === expected)
}
}
test("SPARK-35099: convert a literal of year-month interval to SQL string") {
Seq(
Period.ofYears(-1) -> "-1-0",
Period.of(9999, 11, 0) -> "9999-11",
Period.ofMonths(-11) -> "-0-11"
).foreach { case (period, intervalPayload) =>
val literal = Literal.apply(period)
val expected = s"INTERVAL '$intervalPayload' YEAR TO MONTH"
assert(literal.sql === expected)
assert(literal.toString === expected)
}
}
}
|
cloud-fan/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/LiteralExpressionSuite.scala
|
Scala
|
apache-2.0
| 18,107
|
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import akka.actor.Actor
import akka.actor.ActorSystem
import akka.japi.Creator
import spray.routing.Directive.pimpApply
import whisk.common.TransactionId
import whisk.core.loadBalancer.LoadBalancerService
import whisk.core.WhiskConfig
import whisk.http.BasicHttpService
import whisk.http.BasicRasService
import spray.routing.Route
import akka.actor.ActorContext
import akka.event.Logging.InfoLevel
import akka.event.Logging.LogLevel
import whisk.core.entitlement.EntitlementProvider
/**
* The Controller is the service that provides the REST API for OpenWhisk.
*
* It extends the BasicRasService so it includes a ping endpoint for monitoring.
*
* Spray sends messages to akka Actors -- the Controller is an Actor, ready to receive messages.
*
* @Idioglossia uses the spray-routing DSL
* http://spray.io/documentation/1.1.3/spray-routing/advanced-topics/understanding-dsl-structure/
*
* @param config A set of properties needed to run an instance of the controller service
* @param instance if running in scale-out, a unique identifier for this instance in the group
* @param verbosity logging verbosity
* @param executionContext Scala runtime support for concurrent operations
*/
class Controller(
config: WhiskConfig,
instance: Int,
loglevel: LogLevel)
extends BasicRasService
with Actor {
// each akka Actor has an implicit context
override def actorRefFactory: ActorContext = context
/**
* A Route in spray is technically a function taking a RequestContext as a parameter.
*
* @Idioglossia The ~ spray DSL operator composes two independent Routes, building a routing
* tree structure.
* @see http://spray.io/documentation/1.2.3/spray-routing/key-concepts/routes/#composing-routes
*/
override def routes(implicit transid: TransactionId): Route = {
// handleRejections wraps the inner Route with a logical error-handler for
// unmatched paths
handleRejections(customRejectionHandler) {
super.routes ~ apiv1.routes
}
}
setVerbosity(loglevel)
info(this, s"starting controller instance ${instance}")
/** The REST APIs. */
private val apiv1 = new RestAPIVersion_v1(config, loglevel, context.system)
}
/**
* Singleton object provides a factory to create and start an instance of the Controller service.
*/
object Controller {
// requiredProperties is a Map whose keys define properties that must be bound to
// a value, and whose values are default values. A null value in the Map means there is
// no default value specified, so it must appear in the properties file
def requiredProperties = Map(WhiskConfig.servicePort -> 8080.toString) ++
RestAPIVersion_v1.requiredProperties ++
LoadBalancerService.requiredProperties ++
EntitlementProvider.requiredProperties
def optionalProperties = EntitlementProvider.optionalProperties
// akka-style factory to create a Controller object
private class ServiceBuilder(config: WhiskConfig, instance: Int) extends Creator[Controller] {
def create = new Controller(config, instance, InfoLevel)
}
def main(args: Array[String]): Unit = {
implicit val system = ActorSystem("controller-actor-system")
// extract configuration data from the environment
val config = new WhiskConfig(requiredProperties, optionalProperties)
// if deploying multiple instances (scale out), must pass the instance number as the
// second argument. (TODO .. seems fragile)
val instance = if (args.length > 0) args(1).toInt else 0
if (config.isValid) {
val port = config.servicePort.toInt
BasicHttpService.startService(system, "controller", "0.0.0.0", port, new ServiceBuilder(config, instance))
}
}
}
|
nwspeete-ibm/openwhisk
|
core/controller/src/main/scala/whisk/core/controller/Controller.scala
|
Scala
|
apache-2.0
| 4,462
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast}
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.sources.{BaseRelation, HadoopFsRelation, InsertableRelation}
import org.apache.spark.sql.{AnalysisException, SQLContext, SaveMode}
/**
* Try to replaces [[UnresolvedRelation]]s with [[ResolvedDataSource]].
*/
private[sql] class ResolveDataSource(sqlContext: SQLContext) extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case u: UnresolvedRelation if u.tableIdentifier.database.isDefined =>
try {
val resolved = ResolvedDataSource(
sqlContext,
userSpecifiedSchema = None,
partitionColumns = Array(),
provider = u.tableIdentifier.database.get,
options = Map("path" -> u.tableIdentifier.table))
val plan = LogicalRelation(resolved.relation)
u.alias.map(a => Subquery(u.alias.get, plan)).getOrElse(plan)
} catch {
case e: ClassNotFoundException => u
case e: Exception =>
// the provider is valid, but failed to create a logical plan
u.failAnalysis(e.getMessage)
}
}
}
/**
* A rule to do pre-insert data type casting and field renaming. Before we insert into
* an [[InsertableRelation]], we will use this rule to make sure that
* the columns to be inserted have the correct data type and fields have the correct names.
*/
private[sql] object PreInsertCastAndRename extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
// We are inserting into an InsertableRelation or HadoopFsRelation.
case i @ InsertIntoTable(
l @ LogicalRelation(_: InsertableRelation | _: HadoopFsRelation, _), _, child, _, _) => {
// First, make sure the data to be inserted have the same number of fields with the
// schema of the relation.
if (l.output.size != child.output.size) {
sys.error(
s"$l requires that the query in the SELECT clause of the INSERT INTO/OVERWRITE " +
s"statement generates the same number of columns as its schema.")
}
castAndRenameChildOutput(i, l.output, child)
}
}
/** If necessary, cast data types and rename fields to the expected types and names. */
def castAndRenameChildOutput(
insertInto: InsertIntoTable,
expectedOutput: Seq[Attribute],
child: LogicalPlan): InsertIntoTable = {
val newChildOutput = expectedOutput.zip(child.output).map {
case (expected, actual) =>
val needCast = !expected.dataType.sameType(actual.dataType)
// We want to make sure the filed names in the data to be inserted exactly match
// names in the schema.
val needRename = expected.name != actual.name
(needCast, needRename) match {
case (true, _) => Alias(Cast(actual, expected.dataType), expected.name)()
case (false, true) => Alias(actual, expected.name)()
case (_, _) => actual
}
}
if (newChildOutput == child.output) {
insertInto
} else {
insertInto.copy(child = Project(newChildOutput, child))
}
}
}
/**
* A rule to do various checks before inserting into or writing to a data source table.
*/
private[sql] case class PreWriteCheck(catalog: Catalog) extends (LogicalPlan => Unit) {
def failAnalysis(msg: String): Unit = { throw new AnalysisException(msg) }
def apply(plan: LogicalPlan): Unit = {
plan.foreach {
case i @ logical.InsertIntoTable(
l @ LogicalRelation(t: InsertableRelation, _), partition, query, overwrite, ifNotExists) =>
// Right now, we do not support insert into a data source table with partition specs.
if (partition.nonEmpty) {
failAnalysis(s"Insert into a partition is not allowed because $l is not partitioned.")
} else {
// Get all input data source relations of the query.
val srcRelations = query.collect {
case LogicalRelation(src: BaseRelation, _) => src
}
if (srcRelations.contains(t)) {
failAnalysis(
"Cannot insert overwrite into table that is also being read from.")
} else {
// OK
}
}
case logical.InsertIntoTable(
LogicalRelation(r: HadoopFsRelation, _), part, query, overwrite, _) =>
// We need to make sure the partition columns specified by users do match partition
// columns of the relation.
val existingPartitionColumns = r.partitionColumns.fieldNames.toSet
val specifiedPartitionColumns = part.keySet
if (existingPartitionColumns != specifiedPartitionColumns) {
failAnalysis(s"Specified partition columns " +
s"(${specifiedPartitionColumns.mkString(", ")}) " +
s"do not match the partition columns of the table. Please use " +
s"(${existingPartitionColumns.mkString(", ")}) as the partition columns.")
} else {
// OK
}
PartitioningUtils.validatePartitionColumnDataTypes(
r.schema, part.keySet.toArray, catalog.conf.caseSensitiveAnalysis)
// Get all input data source relations of the query.
val srcRelations = query.collect {
case LogicalRelation(src: BaseRelation, _) => src
}
if (srcRelations.contains(r)) {
failAnalysis(
"Cannot insert overwrite into table that is also being read from.")
} else {
// OK
}
case logical.InsertIntoTable(l: LogicalRelation, _, _, _, _) =>
// The relation in l is not an InsertableRelation.
failAnalysis(s"$l does not allow insertion.")
case logical.InsertIntoTable(t, _, _, _, _) =>
if (!t.isInstanceOf[LeafNode] || t == OneRowRelation || t.isInstanceOf[LocalRelation]) {
failAnalysis(s"Inserting into an RDD-based table is not allowed.")
} else {
// OK
}
case CreateTableUsingAsSelect(tableIdent, _, _, partitionColumns, mode, _, query) =>
// When the SaveMode is Overwrite, we need to check if the table is an input table of
// the query. If so, we will throw an AnalysisException to let users know it is not allowed.
if (mode == SaveMode.Overwrite && catalog.tableExists(tableIdent)) {
// Need to remove SubQuery operator.
EliminateSubQueries(catalog.lookupRelation(tableIdent)) match {
// Only do the check if the table is a data source table
// (the relation is a BaseRelation).
case l @ LogicalRelation(dest: BaseRelation, _) =>
// Get all input data source relations of the query.
val srcRelations = query.collect {
case LogicalRelation(src: BaseRelation, _) => src
}
if (srcRelations.contains(dest)) {
failAnalysis(
s"Cannot overwrite table $tableIdent that is also being read from.")
} else {
// OK
}
case _ => // OK
}
} else {
// OK
}
PartitioningUtils.validatePartitionColumnDataTypes(
query.schema, partitionColumns, catalog.conf.caseSensitiveAnalysis)
case _ => // OK
}
}
}
|
chenc10/Spark-PAF
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala
|
Scala
|
apache-2.0
| 8,478
|
package ml.tests
import scala.collection.mutable.ArrayBuffer
import ml.linalg.Matrix
import ml.linalg.Ones
import ml.linalg.Identity
import ml.linalg.Zeros
import ml.models.KMeansModel
import ml.metrics.distance._
import ml.cluster.Kmeans
import ml.cluster.KMedoids
import ml.linalg.Table._
import ml.stats.statsOps._
import ml.util.VectorOps._
object test {
def main(args: Array[String]){
val m3 = Matrix(Array(1.0,2,3,5,9,0,1,2,3,5,9,0, 12,23,2, 80, 70, 50),6,3 );
val m4 = Matrix(Array(1,2,3,4,4,6,7,8,9,4,5,6), 4,3);
println(m3.takeRow(Array(5)));
//val kmobj = Kmeans(m3, 2, distMethod="Minkowski", p=2);
//println(kmobj);
val kmedobj = KMedoids(m3, 2, distMethod="Minkowski", p=2);
println(m3);
println(kmedobj);
println(findIndices(m3(1), 9))
}
}
|
jccarrasco/maleta
|
src/ml/tests/test.scala
|
Scala
|
gpl-2.0
| 798
|
import akka.actor.Status.{Success}
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import com.dazito.scala.dakkabase.{DakkabaseDb, ScalaPongActor}
import com.dazito.scala.dakkabase.messages.{ListSetRequest, SetRequest}
import org.scalatest.{FunSpecLike, Matchers}
import scala.concurrent.duration._
import scala.concurrent.{Promise, Await, Future}
/**
* Created by daz on 21/02/2016.
*/
class ScalaAskExampleTest extends FunSpecLike with Matchers {
val system = ActorSystem()
implicit val timeout = Timeout(5 seconds);
val pongActor = system.actorOf(Props(classOf[ScalaPongActor]))
val dakkabaseActor = system.actorOf(Props(classOf[DakkabaseDb]))
describe("Pong actor") {
it("should responde with Pong") {
val future = pongActor ? "Ping" // Uses the implicit timeout
// The actor is untyped so it returns a Future[AnyRef], we use mapTo[String] to "cast" it to a String
val result = Await.result(future.mapTo[String], 1 second)
assert(result == "Pong")
}
it("should fail on unknown message") {
val future = pongActor ? "unknown"
intercept[Exception] {
Await.result(future.mapTo[String], 1 second)
}
}
}
describe("FutureExamples") {
import scala.concurrent.ExecutionContext.Implicits.global
it("Should print to console") {
(pongActor ? "Ping").onSuccess({
case x: String => println("Replied with: " + x)
})
Thread.sleep(200)
}
it("should cause and log a failure"){
askPong("causeError").onFailure{
case e: Exception => println("Got Exception")
}
}
it("should fail and recover") {
val f = askPong("causeError").recover( {
case t: Exception => "default" // In case of error, return "default"
})
f should equal("default")
}
it("should recover from failure asynchronously") {
askPong("causeError").recoverWith({
case t: Exception => askPong("ping")
})
}
it("composing futures") {
val future = askPong("ping")
.flatMap(message => askPong("Ping" + message))
.recover{
case _: Exception => "There was an error"
}
val result = Await.result(future, 1 second)
result should equal("There was an error")
}
it("combining futures") {
val future1 = Future{4}
val future2 = Future{5}
val futureAddition: Future[Int] = {
for (res1 <- future1; res2 <- future2) yield res1 + res2
}
val additionResult = Await.result(futureAddition, 1 second)
assert(additionResult == 9)
}
it("dealing with lists of futures") {
val listOfFutures: List[Future[String]] = List("Pong", "Pong", "failed").map(pong => askPong(pong))
val futureOfList: Future[List[String]] = Future.sequence(listOfFutures)
Future.sequence(listOfFutures.map(future => future.recover{case _: Exception => ""}))
}
it("dealing with batch insert") {
val setRequestList = ListSetRequest(List(new SetRequest("batchInsert1", "value")))
val future = dakkabaseActor ? setRequestList
val additionResult = Await.result(future, 1 second)
assert(additionResult.equals(Success))
}
}
def askPong(message: String): Future[String] = (pongActor ? message).mapTo[String]
}
|
dazito/LearningAkkaScalaServer
|
akkademy-db/src/test/scala/ScalaAskExampleTest.scala
|
Scala
|
mit
| 3,738
|
/* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.easysync
import scala.annotation.tailrec
package object model {
def identity[T](n: Int): Changeset[T] =
if (n > 0)
Changeset(n, Seq(Range(0, n)))
else
Changeset(0, Seq())
def follow[T](a: Changeset[T], b: Changeset[T]): Changeset[T] = {
if (a.from != b.from) {
throw new SyncException(f"Both changesets must apply to the same document length but here apply to ${a.from} and ${b.from}")
}
@tailrec
def loop(aidx: Int, a: Seq[Characters[T]], b: Seq[Characters[T]], acc: Seq[Characters[T]]): Seq[Characters[T]] =
(a, b) match {
case (Seq(Sequence(elemsa), resta @ _*), Seq(sb @ Sequence(_), restb @ _*)) =>
// new elements in both, retain size in first and add elements from second
loop(aidx + elemsa.size, resta, restb, acc :+ Range(aidx, aidx + elemsa.size) :+ sb)
case (Seq(Sequence(elemsa), resta @ _*), Seq(Range(_, _), _*)) =>
// retain elements from first
loop(aidx + elemsa.size, resta, b, acc :+ Range(aidx, aidx + elemsa.size))
case (Seq(Range(_, _), _*), Seq(Sequence(elemsb), restb @ _*)) =>
// insert elements from second
loop(aidx, a, restb, acc :+ Sequence(elemsb))
case (Seq(ra @ Range(starta, enda), resta @ _*), Seq(rb @ Range(startb, endb), restb @ _*)) =>
// retain elements retained in both
if (ra.overlap(rb)) {
val retain = ra.intersect(rb)
val resta1 =
if (retain.end < enda)
Range(retain.end, enda) +: resta
else
resta
val restb1 =
if (retain.end < endb)
Range(retain.end, endb) +: restb
else
restb
loop(aidx + retain.end - starta, resta1, restb1, acc :+ retain.transpose(aidx))
} else if (enda <= startb) {
// retain in a is strictly before retain in b, skip a
loop(aidx + ra.size, resta, b, acc)
} else {
// retain in a is strictly after retain in b, skip b
loop(aidx, a, restb, acc)
}
case (Seq(Sequence(s), rest @ _*), Seq()) =>
// retain the inserted characters
loop(aidx + s.size, rest, Seq(), acc :+ Range(aidx, aidx + s.size))
case (Seq(r @ Range(_, _), rest @ _*), Seq()) =>
// forget about it
loop(aidx + r.size, rest, Seq(), acc)
case (Seq(), Seq(Sequence(s), rest @ _*)) =>
// insert characters
loop(aidx, Seq(), rest, acc :+ Sequence(s))
case (Seq(), Seq(Range(_, _), rest @ _*)) =>
// forget about it
loop(aidx, Seq(), rest, acc)
case (Seq(), Seq()) =>
// it is over
acc
}
Changeset(a.to, loop(0, a.chars, b.chars, Nil)).compact
}
}
|
satabin/akka-easysync
|
src/main/scala/gnieh/easysync/model/package.scala
|
Scala
|
apache-2.0
| 3,456
|
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.onlinetechvision.spark.hazelcast.connector.rdd.implicits
import java.util.Properties
import com.hazelcast.core.{Hazelcast, IList}
import com.onlinetechvision.spark.hazelcast.connector.{User, DistributedObjectType}
import com.onlinetechvision.spark.hazelcast.connector.config.SparkHazelcastConfig._
import org.apache.spark.{SparkConf, SparkContext}
object WriteRDDToDistributedList {
def main(args: Array[String]) {
// Spark Context is created...
val sc = new SparkContext(new SparkConf().setAppName("spark-hazelcast").setMaster("local"))
// RDD content is created...
val data = Array(User(1, "name1", "surname1"),
User(2, "name2", "surname2"),
User(3, "name3", "surname3"),
User(4, "name4", "surname4"),
User(5, "name5", "surname5"))
// RDD is created...
val userRDD = sc.parallelize[User](data)
// Number of elements in RDD is printed as 5...
println(s"userRDD has got ${userRDD.count} elements.")
// Spark Hazelcast properties are created...
val sparkHazelcastProperties = new Properties()
sparkHazelcastProperties.put(HazelcastXMLConfigFileName, "hazelcast_config.xml")
sparkHazelcastProperties.put(HazelcastDistributedObjectName, "test_distributed_list")
sparkHazelcastProperties.put(HazelcastDistributedObjectType, DistributedObjectType.IList)
// userRDD is written to Hazelcast as a Distributed List...
import com.onlinetechvision.spark.hazelcast.connector.rdd.implicits._
userRDD.writeItemToHazelcast(sparkHazelcastProperties)
// Gets 'test_distributed_list' Hazelcast Distributed List instance...
val hzInstance = Hazelcast.getHazelcastInstanceByName("test_hazelcast_instance")
val hzDistributedList: IList[User] = hzInstance.getList("test_distributed_list")
// Prints items of 'test_distributed_list'
import scala.collection.JavaConversions.asScalaIterator
for (item <- hzDistributedList.iterator()) {
println(item)
}
}
}
|
erenavsarogullari/spark-hazelcast-connector
|
examples/src/main/scala/com/onlinetechvision/spark/hazelcast/connector/rdd/implicits/WriteRDDToDistributedList.scala
|
Scala
|
apache-2.0
| 2,664
|
package com.googlecode.warikan.domain.roles
import com.googlecode.warikan.domain.shared.math._
import com.googlecode.warikan.domain.models._
import com.googlecode.warikan.domain.repositories._
/**
* PieMaker.
*
* @author yukei
*/
trait PieMaker {
/**
* Create Pie to be used in allot calculation.
*
* @param slopeId id of Slope entity
* @return Pie(name of Participant -> Fraction)
*/
def createPie(slopeId:String):Map[UserName, Fraction]
}
|
digitalsoul0124/warikan
|
src/main/scala/com/googlecode/warikan/domain/roles/PieMaker.scala
|
Scala
|
mit
| 483
|
package is.hail.types.physical.stypes.interfaces
import is.hail.annotations.Region
import is.hail.asm4s._
import is.hail.expr.ir.EmitCodeBuilder
import is.hail.linalg.{BLAS, LAPACK}
import is.hail.types.physical.stypes.concrete.{SNDArraySlice, SNDArraySliceValue}
import is.hail.types.physical.stypes.{EmitType, SSettable, SType, SValue}
import is.hail.types.physical.{PCanonicalNDArray, PNDArray, PType}
import is.hail.types.{RNDArray, TypeWithRequiredness}
import is.hail.utils.{FastIndexedSeq, toRichIterable, valueToRichCodeRegion}
import scala.collection.mutable
object SNDArray {
def numElements(shape: IndexedSeq[Value[Long]]): Code[Long] = {
shape.foldLeft(1L: Code[Long])(_ * _)
}
// Column major order
def forEachIndexColMajor(cb: EmitCodeBuilder, shape: IndexedSeq[Value[Long]], context: String)
(f: (EmitCodeBuilder, IndexedSeq[Value[Long]]) => Unit): Unit = {
forEachIndexWithInitAndIncColMajor(cb, shape, shape.map(_ => (cb: EmitCodeBuilder) => ()), shape.map(_ => (cb: EmitCodeBuilder) => ()), context)(f)
}
def coiterate(cb: EmitCodeBuilder, arrays: (SNDArrayValue, String)*)(body: IndexedSeq[SValue] => Unit): Unit = {
if (arrays.isEmpty) return
val indexVars = Array.tabulate(arrays(0)._1.st.nDims)(i => s"i$i").toFastIndexedSeq
val indices = Array.range(0, arrays(0)._1.st.nDims).toFastIndexedSeq
coiterate(cb, indexVars, arrays.map { case (array, name) => (array, indices, name) }: _*)(body)
}
// Note: to iterate through an array in column major order, make sure the indices are in ascending order. E.g.
// A.coiterate(cb, region, IndexedSeq("i", "j"), IndexedSeq((A, IndexedSeq(0, 1), "A"), (B, IndexedSeq(0, 1), "B")), {
// SCode.add(cb, a, b)
// })
// computes A += B.
def coiterate(
cb: EmitCodeBuilder,
indexVars: IndexedSeq[String],
arrays: (SNDArrayValue, IndexedSeq[Int], String)*
)(body: IndexedSeq[SValue] => Unit
): Unit = {
_coiterate(cb, indexVars, arrays: _*) { ptrs =>
val codes = ptrs.zip(arrays).map { case (ptr, (array, _, _)) =>
val pt = array.st.pType.elementType
pt.loadCheapSCode(cb, pt.loadFromNested(ptr))
}
body(codes)
}
}
def _coiterate(
cb: EmitCodeBuilder,
indexVars: IndexedSeq[String],
arrays: (SNDArrayValue, IndexedSeq[Int], String)*
)(body: IndexedSeq[Value[Long]] => Unit
): Unit = {
val indexSizes = new Array[Settable[Int]](indexVars.length)
val indexCoords = Array.tabulate(indexVars.length) { i => cb.newLocal[Int](indexVars(i)) }
case class ArrayInfo(
array: SNDArrayValue,
strides: IndexedSeq[Value[Long]],
pos: IndexedSeq[Settable[Long]],
indexToDim: Map[Int, Int],
name: String)
val info = arrays.toIndexedSeq.map { case (array, indices, name) =>
for (idx <- indices) assert(idx < indexVars.length && idx >= 0)
// FIXME: relax this assumption to handle transposing, non-column major
for (i <- 0 until indices.length - 1) assert(indices(i) < indices(i+1))
assert(indices.length == array.st.nDims)
val shape = array.shapes
for (i <- indices.indices) {
val idx = indices(i)
if (indexSizes(idx) == null) {
indexSizes(idx) = cb.newLocal[Int](s"${indexVars(idx)}_max")
cb.assign(indexSizes(idx), shape(i).toI)
} else {
cb.ifx(indexSizes(idx).cne(shape(i).toI), s"${indexVars(idx)} indexes incompatible dimensions")
}
}
val strides = array.strides
val pos = Array.tabulate(array.st.nDims + 1) { i => cb.newLocal[Long](s"$name$i") }
val indexToDim = indices.zipWithIndex.toMap
ArrayInfo(array, strides, pos, indexToDim, name)
}
def recurLoopBuilder(idx: Int): Unit = {
if (idx < 0) {
// FIXME: to handle non-column major, need to use `pos` of smallest index var
body(info.map(_.pos(0)))
} else {
val coord = indexCoords(idx)
def init(): Unit = {
cb.assign(coord, 0)
for (n <- arrays.indices) {
if (info(n).indexToDim.contains(idx)) {
val i = info(n).indexToDim(idx)
// FIXME: assumes array's indices in ascending order
cb.assign(info(n).pos(i), info(n).pos(i+1))
}
}
}
def increment(): Unit = {
cb.assign(coord, coord + 1)
for (n <- arrays.indices) {
if (info(n).indexToDim.contains(idx)) {
val i = info(n).indexToDim(idx)
cb.assign(info(n).pos(i), info(n).pos(i) + info(n).strides(i))
}
}
}
cb.forLoop(init(), coord < indexSizes(idx), increment(), recurLoopBuilder(idx - 1))
}
}
for (n <- arrays.indices) {
cb.assign(info(n).pos(info(n).array.st.nDims), info(n).array.firstDataAddress)
}
recurLoopBuilder(indexVars.length - 1)
}
// Column major order
def forEachIndexWithInitAndIncColMajor(cb: EmitCodeBuilder, shape: IndexedSeq[Value[Long]], inits: IndexedSeq[EmitCodeBuilder => Unit],
incrementers: IndexedSeq[EmitCodeBuilder => Unit], context: String)
(f: (EmitCodeBuilder, IndexedSeq[Value[Long]]) => Unit): Unit = {
val indices = Array.tabulate(shape.length) { dimIdx => cb.newLocal[Long](s"${ context }_foreach_dim_$dimIdx", 0L) }
def recurLoopBuilder(dimIdx: Int, innerLambda: () => Unit): Unit = {
if (dimIdx == shape.length) {
innerLambda()
}
else {
val dimVar = indices(dimIdx)
recurLoopBuilder(dimIdx + 1,
() => {
cb.forLoop({
inits(dimIdx)(cb)
cb.assign(dimVar, 0L)
}, dimVar < shape(dimIdx), {
incrementers(dimIdx)(cb)
cb.assign(dimVar, dimVar + 1L)
},
innerLambda()
)
}
)
}
}
val body = () => f(cb, indices)
recurLoopBuilder(0, body)
}
// Row major order
def forEachIndexRowMajor(cb: EmitCodeBuilder, shape: IndexedSeq[Value[Long]], context: String)
(f: (EmitCodeBuilder, IndexedSeq[Value[Long]]) => Unit): Unit = {
forEachIndexWithInitAndIncRowMajor(cb, shape, shape.map(_ => (cb: EmitCodeBuilder) => ()), shape.map(_ => (cb: EmitCodeBuilder) => ()), context)(f)
}
// Row major order
def forEachIndexWithInitAndIncRowMajor(cb: EmitCodeBuilder, shape: IndexedSeq[Value[Long]], inits: IndexedSeq[EmitCodeBuilder => Unit],
incrementers: IndexedSeq[EmitCodeBuilder => Unit], context: String)
(f: (EmitCodeBuilder, IndexedSeq[Value[Long]]) => Unit): Unit = {
val indices = Array.tabulate(shape.length) { dimIdx => cb.newLocal[Long](s"${ context }_foreach_dim_$dimIdx", 0L) }
def recurLoopBuilder(dimIdx: Int, innerLambda: () => Unit): Unit = {
if (dimIdx == -1) {
innerLambda()
}
else {
val dimVar = indices(dimIdx)
recurLoopBuilder(dimIdx - 1,
() => {
cb.forLoop({
inits(dimIdx)(cb)
cb.assign(dimVar, 0L)
}, dimVar < shape(dimIdx), {
incrementers(dimIdx)(cb)
cb.assign(dimVar, dimVar + 1L)
},
innerLambda()
)
}
)
}
}
val body = () => f(cb, indices)
recurLoopBuilder(shape.length - 1, body)
}
// Column major order
def unstagedForEachIndex(shape: IndexedSeq[Long])
(f: IndexedSeq[Long] => Unit): Unit = {
val indices = Array.tabulate(shape.length) {dimIdx => 0L}
def recurLoopBuilder(dimIdx: Int, innerLambda: () => Unit): Unit = {
if (dimIdx == shape.length) {
innerLambda()
}
else {
recurLoopBuilder(dimIdx + 1,
() => {
(0 until shape(dimIdx).toInt).foreach(_ => {
innerLambda()
indices(dimIdx) += 1
})
}
)
}
}
val body = () => f(indices)
recurLoopBuilder(0, body)
}
def assertMatrix(nds: SNDArrayValue*): Unit = {
for (nd <- nds) assert(nd.st.nDims == 2)
}
def assertVector(nds: SNDArrayValue*): Unit = {
for (nd <- nds) assert(nd.st.nDims == 1)
}
def assertColMajor(cb: EmitCodeBuilder, nds: SNDArrayValue*): Unit = {
for (nd <- nds) {
cb.ifx(nd.strides(0).cne(nd.st.pType.elementType.byteSize),
cb._fatal("Require column major: found row stride ", nd.strides(0).toS, ", expected ", nd.st.pType.elementType.byteSize.toString))
}
}
def copyVector(cb: EmitCodeBuilder, X: SNDArrayValue, Y: SNDArrayValue): Unit = {
val Seq(n) = X.shapes
Y.assertHasShape(cb, FastIndexedSeq(n), "copy: vectors have different sizes: ", Y.shapes(0).toS, ", ", n.toS)
val ldX = X.eltStride(0).max(1)
val ldY = Y.eltStride(0).max(1)
cb += Code.invokeScalaObject5[Int, Long, Int, Long, Int, Unit](BLAS.getClass, "dcopy",
n.toI,
X.firstDataAddress, ldX,
Y.firstDataAddress, ldY)
}
def scale(cb: EmitCodeBuilder, alpha: SValue, X: SNDArrayValue): Unit =
scale(cb, alpha.asFloat64.value, X)
def scale(cb: EmitCodeBuilder, alpha: Value[Double], X: SNDArrayValue): Unit = {
val Seq(n) = X.shapes
val ldX = X.eltStride(0).max(1)
cb += Code.invokeScalaObject4[Int, Double, Long, Int, Unit](BLAS.getClass, "dscal",
n.toI, alpha, X.firstDataAddress, ldX)
}
def gemv(cb: EmitCodeBuilder, trans: String, A: SNDArrayValue, X: SNDArrayValue, Y: SNDArrayValue): Unit = {
gemv(cb, trans, 1.0, A, X, 1.0, Y)
}
def gemv(cb: EmitCodeBuilder, trans: String, alpha: Value[Double], A: SNDArrayValue, X: SNDArrayValue, beta: Value[Double], Y: SNDArrayValue): Unit = {
assertMatrix(A)
val Seq(m, n) = A.shapes
val errMsg = "gemv: incompatible dimensions"
if (trans == "N") {
X.assertHasShape(cb, FastIndexedSeq(n), errMsg)
Y.assertHasShape(cb, FastIndexedSeq(m), errMsg)
} else {
X.assertHasShape(cb, FastIndexedSeq(m), errMsg)
Y.assertHasShape(cb, FastIndexedSeq(n), errMsg)
}
assertColMajor(cb, A)
val ldA = A.eltStride(1).max(1)
val ldX = X.eltStride(0).max(1)
val ldY = Y.eltStride(0).max(1)
cb += Code.invokeScalaObject11[String, Int, Int, Double, Long, Int, Long, Int, Double, Long, Int, Unit](BLAS.getClass, "dgemv",
trans, m.toI, n.toI,
alpha,
A.firstDataAddress, ldA,
X.firstDataAddress, ldX,
beta,
Y.firstDataAddress, ldY)
}
def gemm(cb: EmitCodeBuilder, tA: String, tB: String, A: SNDArrayValue, B: SNDArrayValue, C: SNDArrayValue): Unit =
gemm(cb, tA, tB, 1.0, A, B, 1.0, C)
def gemm(cb: EmitCodeBuilder, tA: String, tB: String, alpha: Value[Double], A: SNDArrayValue, B: SNDArrayValue, beta: Value[Double], C: SNDArrayValue): Unit = {
assertMatrix(A, B, C)
val Seq(m, n) = C.shapes
val k = if (tA == "N") A.shapes(1) else A.shapes(0)
val errMsg = "gemm: incompatible matrix dimensions"
if (tA == "N")
A.assertHasShape(cb, FastIndexedSeq(m, k), errMsg)
else
A.assertHasShape(cb, FastIndexedSeq(k, m), errMsg)
if (tB == "N")
B.assertHasShape(cb, FastIndexedSeq(k, n), errMsg)
else
B.assertHasShape(cb, FastIndexedSeq(n, k), errMsg)
assertColMajor(cb, A, B, C)
val ldA = A.eltStride(1).max(1)
val ldB = B.eltStride(1).max(1)
val ldC = C.eltStride(1).max(1)
cb += Code.invokeScalaObject13[String, String, Int, Int, Int, Double, Long, Int, Long, Int, Double, Long, Int, Unit](BLAS.getClass, "dgemm",
tA, tB, m.toI, n.toI, k.toI,
alpha,
A.firstDataAddress, ldA,
B.firstDataAddress, ldB,
beta,
C.firstDataAddress, ldC)
}
def trmm(cb: EmitCodeBuilder, side: String, uplo: String, transA: String, diag: String,
alpha: Value[Double], A: SNDArrayValue, B: SNDArrayValue): Unit = {
assertMatrix(A, B)
assertColMajor(cb, A, B)
val Seq(m, n) = B.shapes
val Seq(a0, a1) = A.shapes
cb.ifx(a1.cne(if (side == "left") m else n), cb._fatal("trmm: incompatible matrix dimensions"))
// Elide check in the common case that we statically know A is square
if (a0 != a1) cb.ifx(a0 < a1, cb._fatal("trmm: A has fewer rows than cols: ", a0.toS, ", ", a1.toS))
val ldA = A.eltStride(1).max(1)
val ldB = B.eltStride(1).max(1)
cb += Code.invokeScalaObject11[String, String, String, String, Int, Int, Double, Long, Int, Long, Int, Unit](BLAS.getClass, "dtrmm",
side, uplo, transA, diag,
m.toI, n.toI,
alpha,
A.firstDataAddress, ldA,
B.firstDataAddress, ldB)
}
def geqrt(A: SNDArrayValue, T: SNDArrayValue, work: SNDArrayValue, blocksize: Value[Long], cb: EmitCodeBuilder): Unit = {
assertMatrix(A)
assertColMajor(cb, A)
assertVector(work, T)
val Seq(m, n) = A.shapes
val nb = blocksize
cb.ifx(nb > m.min(n) || nb < 1, cb._fatal("invalid block size"))
cb.ifx(T.shapes(0) < nb*(m.min(n)), cb._fatal("T too small"))
cb.ifx(work.shapes(0) < nb * n, cb._fatal("work array too small"))
val error = cb.mb.newLocal[Int]()
val ldA = A.eltStride(1).max(1)
cb.assign(error, Code.invokeScalaObject8[Int, Int, Int, Long, Int, Long, Int, Long, Int](LAPACK.getClass, "dgeqrt",
m.toI, n.toI, nb.toI,
A.firstDataAddress, ldA,
T.firstDataAddress, nb.toI.max(1),
work.firstDataAddress))
cb.ifx(error.cne(0), cb._fatal("LAPACK error dtpqrt. Error code = ", error.toS))
}
def gemqrt(side: String, trans: String, V: SNDArrayValue, T: SNDArrayValue, C: SNDArrayValue, work: SNDArrayValue, blocksize: Value[Long], cb: EmitCodeBuilder): Unit = {
assertMatrix(C, V)
assertColMajor(cb, C, V)
assertVector(work, T)
assert(side == "L" || side == "R")
assert(trans == "T" || trans == "N")
val Seq(l, k) = V.shapes
val Seq(m, n) = C.shapes
val nb = blocksize
cb.ifx(nb > k || nb < 1, cb._fatal("invalid block size"))
cb.ifx(T.shapes(0) < nb*k, cb._fatal("invalid T size"))
if (side == "L") {
cb.ifx(l.cne(m), cb._fatal("invalid dimensions"))
cb.ifx(work.shapes(0) < nb * n, cb._fatal("work array too small"))
} else {
cb.ifx(l.cne(n), cb._fatal("invalid dimensions"))
cb.ifx(work.shapes(0) < nb * m, cb._fatal("work array too small"))
}
val error = cb.mb.newLocal[Int]()
val ldV = V.eltStride(1).max(1)
val ldC = C.eltStride(1).max(1)
cb.assign(error, Code.invokeScalaObject13[String, String, Int, Int, Int, Int, Long, Int, Long, Int, Long, Int, Long, Int](LAPACK.getClass, "dgemqrt",
side, trans, m.toI, n.toI, k.toI, nb.toI,
V.firstDataAddress, ldV,
T.firstDataAddress, nb.toI.max(1),
C.firstDataAddress, ldC,
work.firstDataAddress))
cb.ifx(error.cne(0), cb._fatal("LAPACK error dtpqrt. Error code = ", error.toS))
}
def tpqrt(A: SNDArrayValue, B: SNDArrayValue, T: SNDArrayValue, work: SNDArrayValue, blocksize: Value[Long], cb: EmitCodeBuilder): Unit = {
assertMatrix(A, B)
assertColMajor(cb, A, B)
assertVector(work, T)
val Seq(m, n) = B.shapes
val nb = blocksize
cb.ifx(nb > n || nb < 1, cb._fatal("invalid block size"))
cb.ifx(T.shapes(0) < nb*n, cb._fatal("T too small"))
A.assertHasShape(cb, FastIndexedSeq(n, n))
cb.ifx(work.shapes(0) < nb * n, cb._fatal("work array too small"))
val error = cb.mb.newLocal[Int]()
val ldA = A.eltStride(1).max(1)
val ldB = B.eltStride(1).max(1)
cb.assign(error, Code.invokeScalaObject11[Int, Int, Int, Int, Long, Int, Long, Int, Long, Int, Long, Int](LAPACK.getClass, "dtpqrt",
m.toI, n.toI, 0, nb.toI,
A.firstDataAddress, ldA,
B.firstDataAddress, ldB,
T.firstDataAddress, nb.toI.max(1),
work.firstDataAddress))
cb.ifx(error.cne(0), cb._fatal("LAPACK error dtpqrt. Error code = ", error.toS))
}
def tpmqrt(side: String, trans: String, V: SNDArrayValue, T: SNDArrayValue, A: SNDArrayValue, B: SNDArrayValue, work: SNDArrayValue, blocksize: Value[Long], cb: EmitCodeBuilder): Unit = {
assertMatrix(A, B, V)
assertColMajor(cb, A, B, V)
assertVector(work, T)
assert(side == "L" || side == "R")
assert(trans == "T" || trans == "N")
val Seq(l, k) = V.shapes
val Seq(m, n) = B.shapes
val nb = blocksize
cb.ifx(nb > k || nb < 1, cb._fatal("invalid block size"))
cb.ifx(T.shapes(0) < nb*k, cb._fatal("T too small"))
if (side == "L") {
cb.ifx(l.cne(m), cb._fatal("invalid dimensions"))
cb.ifx(work.shapes(0) < nb * n, cb._fatal("work array too small"))
A.assertHasShape(cb, FastIndexedSeq(k, n))
} else {
cb.ifx(l.cne(n), cb._fatal("invalid dimensions"))
cb.ifx(work.shapes(0) < nb * m, cb._fatal("work array too small"))
A.assertHasShape(cb, FastIndexedSeq(m, k))
}
val error = cb.mb.newLocal[Int]()
val ldV = V.eltStride(1).max(1)
val ldA = A.eltStride(1).max(1)
val ldB = B.eltStride(1).max(1)
cb.assign(error, Code.invokeScalaObject16[String, String, Int, Int, Int, Int, Int, Long, Int, Long, Int, Long, Int, Long, Int, Long, Int](LAPACK.getClass, "dtpmqrt",
side, trans, m.toI, n.toI, k.toI, 0, nb.toI,
V.firstDataAddress, ldV,
T.firstDataAddress, nb.toI.max(1),
A.firstDataAddress, ldA,
B.firstDataAddress, ldB,
work.firstDataAddress))
cb.ifx(error.cne(0), cb._fatal("LAPACK error dtpqrt. Error code = ", error.toS))
}
def geqrf_query(cb: EmitCodeBuilder, m: Value[Int], n: Value[Int], region: Value[Region]): Value[Int] = {
val LWorkAddress = cb.newLocal[Long]("dgeqrf_lwork_address")
val LWork = cb.newLocal[Int]("dgeqrf_lwork")
val info = cb.newLocal[Int]("dgeqrf_info")
cb.assign(LWorkAddress, region.allocate(8L, 8L))
cb.assign(info, Code.invokeScalaObject7[Int, Int, Long, Int, Long, Long, Int, Int](LAPACK.getClass, "dgeqrf",
m.toI, n.toI,
0, m.toI,
0,
LWorkAddress, -1))
cb.ifx(info.cne(0), cb._fatal(s"LAPACK error DGEQRF. Failed size query. Error code = ", info.toS))
cb.assign(LWork, Region.loadDouble(LWorkAddress).toI)
cb.memoize((LWork > 0).mux(LWork, 1))
}
def geqrf(cb: EmitCodeBuilder, A: SNDArrayValue, T: SNDArrayValue, work: SNDArrayValue): Unit = {
assertMatrix(A)
assertColMajor(cb, A)
assertVector(T, work)
val Seq(m, n) = A.shapes
cb.ifx(T.shapes(0).cne(m.min(n)), cb._fatal("geqrf: T has wrong size"))
val lwork = work.shapes(0)
cb.ifx(lwork < n.max(1L), cb._fatal("geqrf: work has wrong size"))
val ldA = A.eltStride(1).max(1)
val info = cb.newLocal[Int]("dgeqrf_info")
cb.assign(info, Code.invokeScalaObject7[Int, Int, Long, Int, Long, Long, Int, Int](LAPACK.getClass, "dgeqrf",
m.toI, n.toI,
A.firstDataAddress, ldA,
T.firstDataAddress,
work.firstDataAddress, lwork.toI))
cb.ifx(info.cne(0), cb._fatal(s"LAPACK error DGEQRF. Error code = ", info.toS))
}
def orgqr(cb: EmitCodeBuilder, k: Value[Int], A: SNDArrayValue, T: SNDArrayValue, work: SNDArrayValue): Unit = {
assertMatrix(A)
assertColMajor(cb, A)
assertVector(T, work)
val Seq(m, n) = A.shapes
cb.ifx(k < 0 || k > n.toI, cb._fatal("orgqr: invalid k"))
cb.ifx(T.shapes(0).cne(m.min(n)), cb._fatal("orgqr: T has wrong size"))
val lwork = work.shapes(0)
cb.ifx(lwork < n.max(1L), cb._fatal("orgqr: work has wrong size"))
val ldA = A.eltStride(1).max(1)
val info = cb.newLocal[Int]("dgeqrf_info")
cb.assign(info, Code.invokeScalaObject8[Int, Int, Int, Long, Int, Long, Long, Int, Int](LAPACK.getClass, "dorgqr",
m.toI, n.toI, k.toI,
A.firstDataAddress, ldA,
T.firstDataAddress,
work.firstDataAddress, lwork.toI))
cb.ifx(info.cne(0), cb._fatal(s"LAPACK error DGEQRF. Error code = ", info.toS))
}
}
trait SNDArray extends SType {
def pType: PNDArray
def nDims: Int
def elementType: SType
def elementPType: PType
def elementEmitType: EmitType = EmitType(elementType, pType.elementType.required)
def elementByteSize: Long
override def _typeWithRequiredness: TypeWithRequiredness = RNDArray(elementType.typeWithRequiredness.setRequired(true).r)
}
sealed abstract class NDArrayIndex
case class ScalarIndex(i: Value[Long]) extends NDArrayIndex
case class SliceIndex(begin: Option[Value[Long]], end: Option[Value[Long]]) extends NDArrayIndex
case class SliceSize(begin: Option[Value[Long]], size: SizeValue) extends NDArrayIndex
case object ColonIndex extends NDArrayIndex
// Used to preserve static information about dimension sizes.
// If `l == r`, then we know statically that the sizes are equal, even if
// the size itself is dynamic (e.g. they share the same storage location)
// `l.ceq(r)` compares the sizes dynamically, but takes advantage of static
// knowledge to elide the comparison when possible.
sealed abstract class SizeValue extends Value[Long] {
def ceq(other: SizeValue): Code[Boolean] = (this, other) match {
case (SizeValueStatic(l), SizeValueStatic(r)) => const(l == r)
case (l, r) => if (l == r) const(true) else l.get.ceq(r.get)
}
def cne(other: SizeValue): Code[Boolean] = (this, other) match {
case (SizeValueStatic(l), SizeValueStatic(r)) => const(l != r)
case (l, r) => if (l == r) const(false) else l.get.cne(r.get)
}
}
object SizeValueDyn {
def apply(v: Value[Long]): SizeValueDyn = new SizeValueDyn(v)
def unapply(size: SizeValueDyn): Some[Value[Long]] = Some(size.v)
}
object SizeValueStatic {
def apply(v: Long): SizeValueStatic = {
assert(v >= 0)
new SizeValueStatic(v)
}
def unapply(size: SizeValueStatic): Some[Long] = Some(size.v)
}
final class SizeValueDyn(val v: Value[Long]) extends SizeValue {
def get: Code[Long] = v.get
override def equals(other: Any): Boolean = other match {
case SizeValueDyn(v2) => v eq v2
case _ => false
}
}
final class SizeValueStatic(val v: Long) extends SizeValue {
def get: Code[Long] = const(v)
override def equals(other: Any): Boolean = other match {
case SizeValueStatic(v2) => v == v2
case _ => false
}
}
trait SNDArrayValue extends SValue {
def st: SNDArray
def loadElement(indices: IndexedSeq[Value[Long]], cb: EmitCodeBuilder): SValue
def loadElementAddress(indices: IndexedSeq[Value[Long]], cb: EmitCodeBuilder): Code[Long]
def shapes: IndexedSeq[SizeValue]
def shapeStruct(cb: EmitCodeBuilder): SBaseStructValue
def strides: IndexedSeq[Value[Long]]
def eltStride(i: Int): Code[Int] = st.elementByteSize match {
case 4 => strides(i).toI >> 2
case 8 => strides(i).toI >> 3
case eltSize => strides(i).toI / eltSize.toInt
}
def firstDataAddress: Value[Long]
def outOfBounds(indices: IndexedSeq[Value[Long]], cb: EmitCodeBuilder): Code[Boolean] = {
val shape = this.shapes
val outOfBounds = cb.newLocal[Boolean]("sndarray_out_of_bounds", false)
(0 until st.nDims).foreach { dimIndex =>
cb.assign(outOfBounds, outOfBounds || (indices(dimIndex) >= shape(dimIndex)))
}
outOfBounds
}
def assertInBounds(indices: IndexedSeq[Value[Long]], cb: EmitCodeBuilder, errorId: Int): Unit = {
val shape = this.shapes
for (dimIndex <- 0 until st.nDims) {
cb.ifx(indices(dimIndex) >= shape(dimIndex), {
cb._fatalWithError(errorId,
"Index ", indices(dimIndex).toS,
s" is out of bounds for axis $dimIndex with size ",
shape(dimIndex).toS)
})
}
}
def sameShape(cb: EmitCodeBuilder, other: SNDArrayValue): Code[Boolean] =
hasShape(cb, other.shapes)
def hasShape(cb: EmitCodeBuilder, otherShape: IndexedSeq[SizeValue]): Code[Boolean] = {
var b: Code[Boolean] = const(true)
val shape = this.shapes
assert(shape.length == otherShape.length)
(shape, otherShape).zipped.foreach { (s1, s2) =>
b = s1.ceq(s2)
}
b
}
def assertHasShape(cb: EmitCodeBuilder, otherShape: IndexedSeq[SizeValue], msg: Code[String]*) =
if (!hasShapeStatic(otherShape))
cb.ifx(!hasShape(cb, otherShape), cb._fatal(msg: _*))
// True IFF shape can be proven equal to otherShape statically
def hasShapeStatic(otherShape: IndexedSeq[SizeValue]): Boolean =
shapes == otherShape
// ensure coerceToShape(cb, otherShape).hasShapeStatic(otherShape)
// Inserts any necessary dynamic assertions
def coerceToShape(cb: EmitCodeBuilder, otherShape: IndexedSeq[SizeValue]): SNDArrayValue
def coiterateMutate(cb: EmitCodeBuilder, region: Value[Region], arrays: (SNDArrayValue, String)*)(body: IndexedSeq[SValue] => SValue): Unit =
coiterateMutate(cb, region, false, arrays: _*)(body)
def coiterateMutate(cb: EmitCodeBuilder, region: Value[Region], deepCopy: Boolean, arrays: (SNDArrayValue, String)*)(body: IndexedSeq[SValue] => SValue): Unit = {
if (arrays.isEmpty) return
val indexVars = Array.tabulate(arrays(0)._1.st.nDims)(i => s"i$i").toFastIndexedSeq
val indices = Array.range(0, arrays(0)._1.st.nDims).toFastIndexedSeq
coiterateMutate(cb, region, deepCopy, indexVars, indices, arrays.map { case (array, name) => (array, indices, name) }: _*)(body)
}
def coiterateMutate(cb: EmitCodeBuilder, region: Value[Region], indexVars: IndexedSeq[String], destIndices: IndexedSeq[Int], arrays: (SNDArrayValue, IndexedSeq[Int], String)*)(body: IndexedSeq[SValue] => SValue): Unit =
coiterateMutate(cb, region, false, indexVars, destIndices, arrays: _*)(body)
// Note: to iterate through an array in column major order, make sure the indices are in ascending order. E.g.
// A.coiterate(cb, region, IndexedSeq("i", "j"), IndexedSeq((A, IndexedSeq(0, 1), "A"), (B, IndexedSeq(0, 1), "B")), {
// SCode.add(cb, a, b)
// })
// computes A += B.
def coiterateMutate(
cb: EmitCodeBuilder,
region: Value[Region],
deepCopy: Boolean,
indexVars: IndexedSeq[String],
destIndices: IndexedSeq[Int],
arrays: (SNDArrayValue, IndexedSeq[Int], String)*
)(body: IndexedSeq[SValue] => SValue
): Unit
def slice(cb: EmitCodeBuilder, indices: IndexedSeq[NDArrayIndex]): SNDArraySliceValue = {
val shapeX = shapes
val stridesX = strides
val shapeBuilder = mutable.ArrayBuilder.make[SizeValue]
val stridesBuilder = mutable.ArrayBuilder.make[Value[Long]]
for (i <- indices.indices) indices(i) match {
case ScalarIndex(j) =>
cb.ifx(j < 0 || j >= shapeX(i), cb._fatal("Index out of bounds"))
case SliceIndex(Some(begin), Some(end)) =>
cb.ifx(begin < 0 || end > shapeX(i) || begin > end, cb._fatal("Index out of bounds"))
val s = cb.newLocal[Long]("slice_size", end - begin)
shapeBuilder += SizeValueDyn(s)
stridesBuilder += stridesX(i)
case SliceIndex(None, Some(end)) =>
cb.ifx(end >= shapeX(i) || end < 0, cb._fatal("Index out of bounds"))
shapeBuilder += SizeValueDyn(end)
stridesBuilder += stridesX(i)
case SliceIndex(Some(begin), None) =>
val end = shapeX(i)
cb.ifx(begin < 0 || begin > end, cb._fatal("Index out of bounds"))
val s = cb.newLocal[Long]("slice_size", end - begin)
shapeBuilder += SizeValueDyn(s)
stridesBuilder += stridesX(i)
case SliceIndex(None, None) =>
shapeBuilder += shapeX(i)
stridesBuilder += stridesX(i)
case SliceSize(None, size) =>
cb.ifx(size >= shapeX(i), cb._fatal("Index out of bounds") )
shapeBuilder += size
stridesBuilder += stridesX(i)
case SliceSize(Some(begin), size) =>
cb.ifx(begin < 0 || begin + size > shapeX(i), cb._fatal("Index out of bounds") )
shapeBuilder += size
stridesBuilder += stridesX(i)
case ColonIndex =>
shapeBuilder += shapeX(i)
stridesBuilder += stridesX(i)
}
val newShape = shapeBuilder.result()
val newStrides = stridesBuilder.result()
val firstElementIndices = indices.map {
case ScalarIndex(j) => j
case SliceIndex(Some(begin), _) => begin
case SliceIndex(None, _) => const(0L)
case ColonIndex => const(0L)
}
val newFirstDataAddress = cb.newLocal[Long]("slice_ptr", loadElementAddress(firstElementIndices, cb))
val newSType = SNDArraySlice(PCanonicalNDArray(st.pType.elementType, newShape.size, st.pType.required))
new SNDArraySliceValue(newSType, newShape, newStrides, newFirstDataAddress)
}
}
trait SNDArraySettable extends SNDArrayValue with SSettable
|
hail-is/hail
|
hail/src/main/scala/is/hail/types/physical/stypes/interfaces/SNDArray.scala
|
Scala
|
mit
| 28,560
|
package com.github.mmolimar.ksql
import java.sql.ResultSet
import scala.language.implicitConversions
package object jdbc {
object implicits {
implicit class ResultSetStream(resultSet: ResultSet) {
def toStream: Stream[ResultSet] = new Iterator[ResultSet] {
def hasNext(): Boolean = resultSet.next
def next(): ResultSet = resultSet
}.toStream
}
implicit def toIndexedMap(headers: List[HeaderField]): Map[Int, HeaderField] = {
headers.zipWithIndex.map { case (header, index) =>
HeaderField(header.name, header.label, header.jdbcType, header.length, index + 1)
}.map(h => h.index -> h).toMap
}
}
}
|
mmolimar/ksql-jdbc-driver
|
src/main/scala/com/github/mmolimar/ksql/jdbc/package.scala
|
Scala
|
apache-2.0
| 674
|
package com.twitter.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import com.twitter.util.TimeConversions._
trait TimeLikeSpec[T <: TimeLike[T]] extends WordSpec with GeneratorDrivenPropertyChecks {
val ops: TimeLikeOps[T]
import ops._
"Top, Bottom, Undefined, Nanoseconds(_), Finite(_)" should {
val easyVs = Seq(Zero, Top, Bottom, Undefined,
fromNanoseconds(1), fromNanoseconds(-1))
val vs = easyVs ++ Seq(
fromNanoseconds(Long.MaxValue-1),
fromNanoseconds(Long.MinValue+1))
"behave like boxed doubles" in {
assert((Top compare Undefined) < 0)
assert((Bottom compare Top) < 0)
assert((Undefined compare Undefined) === 0)
assert((Top compare Top) === 0)
assert((Bottom compare Bottom) === 0)
assert(Top + Duration.Top === Top)
assert(Bottom - Duration.Bottom === Undefined)
assert(Top - Duration.Top === Undefined)
assert(Bottom + Duration.Bottom === Bottom)
}
"complementary diff" in {
// Note that this doesn't always hold because of two's
// complement arithmetic.
for (a <- easyVs; b <- easyVs)
assert((a diff b) === -(b diff a))
}
"complementary compare" in {
for (a <- vs; b <- vs) {
val x = a compare b
val y = b compare a
assert(((x == 0 && y == 0) || (x < 0 != y < 0)) === true)
}
}
"commutative max" in {
for (a <- vs; b <- vs)
assert((a max b) === (b max a))
}
"commutative min" in {
for (a <- vs; b <- vs)
assert((a min b) === (b min a))
}
"handle underflows" in {
assert(fromNanoseconds(Long.MinValue) - 1.nanosecond === Bottom)
assert(fromMicroseconds(Long.MinValue) - 1.nanosecond === Bottom)
}
"handle overflows" in {
assert(fromNanoseconds(Long.MaxValue) + 1.nanosecond === Top)
assert(fromMicroseconds(Long.MaxValue) + 1.nanosecond === Top)
}
"Nanoseconds(_) extracts only finite values, in nanoseconds" in {
for (t <- Seq(Top, Bottom, Undefined))
assert(t match {
case Nanoseconds(_) => false
case _ => true
})
for (ns <- Seq(Long.MinValue, -1, 0, 1, Long.MaxValue); t = fromNanoseconds(ns))
assert(t match {
case Nanoseconds(`ns`) => true
case _ => false
})
}
"Finite(_) extracts only finite values" in {
for (t <- Seq(Top, Bottom, Undefined))
assert(t match {
case Finite(_) => false
case _ => true
})
for (ns <- Seq(Long.MinValue, -1, 0, 1, Long.MaxValue); t = fromNanoseconds(ns))
assert(t match {
case Finite(`t`) => true
case _ => false
})
}
"roundtrip through serialization" in {
for (v <- vs) {
val bytes = new ByteArrayOutputStream
val out = new ObjectOutputStream(bytes)
out.writeObject(v)
val in = new ObjectInputStream(new ByteArrayInputStream(bytes.toByteArray))
assert(in.readObject() === v)
}
}
}
"Top" should {
"be impermeable to finite arithmetic" in {
assert(Top - 0.seconds === Top)
assert(Top - 100.seconds === Top)
assert(Top - Duration.fromNanoseconds(Long.MaxValue) === Top)
}
"become undefined when subtracted from itself, or added to bottom" in {
assert(Top - Duration.Top === Undefined)
assert(Top + Duration.Bottom === Undefined)
}
"not be equal to the maximum value" in {
assert(fromNanoseconds(Long.MaxValue) != Top)
}
"always be max" in {
assert((Top max fromSeconds(1)) === Top)
assert((Top max fromFractionalSeconds(1.0)) === Top)
assert((Top max fromNanoseconds(Long.MaxValue)) === Top)
assert((Top max Bottom) === Top)
}
"greater than everything else" in {
assert(fromSeconds(0) < Top)
assert(fromFractionalSeconds(Double.MaxValue) < Top)
assert(fromNanoseconds(Long.MaxValue) < Top)
}
"equal to itself" in {
assert(Top === Top)
}
"more or less equals only to itself" in {
assert(Top.moreOrLessEquals(Top, Duration.Top) === true)
assert(Top.moreOrLessEquals(Top, Duration.Zero) === true)
assert(Top.moreOrLessEquals(Bottom, Duration.Top) === true)
assert(Top.moreOrLessEquals(Bottom, Duration.Zero) === false)
assert(Top.moreOrLessEquals(fromSeconds(0), Duration.Top) === true)
assert(Top.moreOrLessEquals(fromSeconds(0), Duration.Bottom) === false)
}
"Undefined diff to Top" in {
assert((Top diff Top) === Duration.Undefined)
}
}
"Bottom" should {
"be impermeable to finite arithmetic" in {
assert(Bottom + 0.seconds === Bottom)
assert(Bottom + 100.seconds === Bottom)
assert(Bottom + Duration.fromNanoseconds(Long.MaxValue) === Bottom)
}
"become undefined when added with Top or subtracted by bottom" in {
assert(Bottom + Duration.Top === Undefined)
assert(Bottom - Duration.Bottom === Undefined)
}
"always be min" in {
assert((Bottom min Top) === Bottom)
assert((Bottom min fromNanoseconds(0)) === Bottom)
}
"less than everything else" in {
assert(Bottom < fromSeconds(0))
assert(Bottom < fromNanoseconds(Long.MaxValue))
assert(Bottom < fromNanoseconds(Long.MinValue))
}
"less than Top" in {
assert(Bottom < Top)
}
"equal to itself" in {
assert(Bottom === Bottom)
}
"more or less equals only to itself" in {
assert(Bottom.moreOrLessEquals(Bottom, Duration.Top) === true)
assert(Bottom.moreOrLessEquals(Bottom, Duration.Zero) === true)
assert(Bottom.moreOrLessEquals(Top, Duration.Bottom) === false)
assert(Bottom.moreOrLessEquals(Top, Duration.Zero) === false)
assert(Bottom.moreOrLessEquals(fromSeconds(0), Duration.Top) === true)
assert(Bottom.moreOrLessEquals(fromSeconds(0), Duration.Bottom) === false)
}
"Undefined diff to Bottom" in {
assert((Bottom diff Bottom) === Duration.Undefined)
}
}
"Undefined" should {
"be impermeable to any arithmetic" in {
assert(Undefined + 0.seconds === Undefined)
assert(Undefined + 100.seconds === Undefined)
assert(Undefined + Duration.fromNanoseconds(Long.MaxValue) === Undefined)
}
"become undefined when added with Top or subtracted by bottom" in {
assert(Undefined + Duration.Top === Undefined)
assert(Undefined - Duration.Undefined === Undefined)
}
"always be max" in {
assert((Undefined max Top) === Undefined)
assert((Undefined max fromNanoseconds(0)) === Undefined)
}
"greater than everything else" in {
assert(fromSeconds(0) < Undefined)
assert(Top < Undefined)
assert(fromNanoseconds(Long.MaxValue) < Undefined)
}
"equal to itself" in {
assert(Undefined === Undefined)
}
"not more or less equal to anything" in {
assert(Undefined.moreOrLessEquals(Undefined, Duration.Top) === false)
assert(Undefined.moreOrLessEquals(Undefined, Duration.Zero) === false)
assert(Undefined.moreOrLessEquals(Top, Duration.Undefined) === true)
assert(Undefined.moreOrLessEquals(Top, Duration.Zero) === false)
assert(Undefined.moreOrLessEquals(fromSeconds(0), Duration.Top) === false)
assert(Undefined.moreOrLessEquals(fromSeconds(0), Duration.Undefined) === true)
}
"Undefined on diff" in {
assert((Undefined diff Top) === Duration.Undefined)
assert((Undefined diff Bottom) === Duration.Undefined)
assert((Undefined diff fromNanoseconds(123)) === Duration.Undefined)
}
}
"values" should {
"reflect their underlying value" in {
val nss = Seq(
2592000000000000000L, // 30000.days
1040403005001003L, // 12.days+1.hour+3.seconds+5.milliseconds+1.microsecond+3.nanoseconds
123000000000L, // 123.seconds
1L
)
for (ns <- nss) {
val t = fromNanoseconds(ns)
assert(t.inNanoseconds === ns)
assert(t.inMicroseconds === ns/1000L)
assert(t.inMilliseconds === ns/1000000L)
assert(t.inLongSeconds === ns/1000000000L)
assert(t.inMinutes === ns/60000000000L)
assert(t.inHours === ns/3600000000000L)
assert(t.inDays === ns/86400000000000L)
}
}
}
"inSeconds" should {
"equal inLongSeconds when in 32-bit range" in {
val nss = Seq(
315370851000000000L, // 3650.days+3.hours+51.seconds
1040403005001003L, // 12.days+1.hour+3.seconds+5.milliseconds+1.microsecond+3.nanoseconds
1L
)
for (ns <- nss) {
val t = fromNanoseconds(ns)
assert(t.inLongSeconds === t.inSeconds)
}
}
"clamp value to Int.MinValue or MaxValue when out of range" in {
val longNs = 2160000000000000000L // 25000.days
assert(fromNanoseconds(longNs).inSeconds === Int.MaxValue)
assert(fromNanoseconds(-longNs).inSeconds === Int.MinValue)
}
}
"rounding" should {
"maintain top and bottom" in {
assert(Top.floor(1.hour) == Top)
assert(Bottom.floor(1.hour) == Bottom)
}
"divide by zero" in {
assert(Zero.floor(Duration.Zero) == Undefined)
assert(fromSeconds(1).floor(Duration.Zero) == Top)
assert(fromSeconds(-1).floor(Duration.Zero) == Bottom)
}
"deal with undefineds" in {
assert(Undefined.floor(0.seconds) == Undefined)
assert(Undefined.floor(Duration.Top) == Undefined)
assert(Undefined.floor(Duration.Bottom) == Undefined)
assert(Undefined.floor(Duration.Undefined) == Undefined)
}
"round to itself" in {
for (s <- Seq(Long.MinValue, -1, 1, Long.MaxValue); t = s.nanoseconds)
assert(t.floor(t.inNanoseconds.nanoseconds) == t)
}
}
"floor" should {
"round down" in {
assert(60.seconds.floor(1.minute) == 60.seconds)
assert(100.seconds.floor(1.minute) == 60.seconds)
assert(119.seconds.floor(1.minute) == 60.seconds)
assert(120.seconds.floor(1.minute) == 120.seconds)
}
}
"ceiling" should {
"round up" in {
assert(60.seconds.ceil(1.minute) == 60.seconds)
assert(100.seconds.ceil(1.minute) == 120.seconds)
assert(119.seconds.ceil(1.minute) == 120.seconds)
assert(120.seconds.ceil(1.minute) == 120.seconds)
}
}
"from*" should {
"never over/under flow nanos" in {
for (v <- Seq(Long.MinValue, Long.MaxValue)) {
fromNanoseconds(v) match {
case Nanoseconds(ns) => assert(ns == v)
}
}
}
"overflow millis" in {
val millis = TimeUnit.NANOSECONDS.toMillis(Long.MaxValue)
fromMilliseconds(millis) match {
case Nanoseconds(ns) => assert(ns == millis*1e6)
}
assert(fromMilliseconds(millis+1) === Top)
}
"underflow millis" in {
val millis = TimeUnit.NANOSECONDS.toMillis(Long.MinValue)
fromMilliseconds(millis) match {
case Nanoseconds(ns) => assert(ns == millis*1e6)
}
assert(fromMilliseconds(millis-1) === Bottom)
}
}
}
@RunWith(classOf[JUnitRunner])
class TimeFormatTest extends WordSpec {
"TimeFormat" should {
"format correctly with non US locale" in {
val locale = Locale.GERMAN
val format = "EEEE"
val timeFormat = new TimeFormat(format, Some(locale))
val day = "Donnerstag"
assert(timeFormat.parse(day).format(format, locale) === day)
}
}
}
@RunWith(classOf[JUnitRunner])
class TimeTest extends { val ops = Time }
with TimeLikeSpec[Time]
with Eventually
with IntegrationPatience {
"Time" should {
"work in collections" in {
val t0 = Time.fromSeconds(100)
val t1 = Time.fromSeconds(100)
assert(t0 === t1)
assert(t0.hashCode === t1.hashCode)
val pairs = List((t0, "foo"), (t1, "bar"))
assert(pairs.groupBy { case (time: Time, value: String) => time } === Map(t0 -> pairs))
}
"now should be now" in {
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"withTimeAt" in {
val t0 = new Time(123456789L)
Time.withTimeAt(t0) { _ =>
assert(Time.now === t0)
Thread.sleep(50)
assert(Time.now === t0)
}
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"withTimeAt nested" in {
val t0 = new Time(123456789L)
val t1 = t0 + 10.minutes
Time.withTimeAt(t0) { _ =>
assert(Time.now === t0)
Time.withTimeAt(t1) { _ =>
assert(Time.now === t1)
}
assert(Time.now === t0)
}
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"withTimeAt threaded" in {
val t0 = new Time(314159L)
val t1 = new Time(314160L)
Time.withTimeAt(t0) { tc =>
assert(Time.now === t0)
Thread.sleep(50)
assert(Time.now === t0)
tc.advance(Duration.fromNanoseconds(1))
assert(Time.now === t1)
tc.set(t0)
assert(Time.now === t0)
@volatile var threadTime: Option[Time] = None
val thread = new Thread {
override def run() {
threadTime = Some(Time.now)
}
}
thread.start()
thread.join()
assert(threadTime.get != t0)
}
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"withTimeFunction" in {
val t0 = Time.now
var t = t0
Time.withTimeFunction(t) { _ =>
assert(Time.now === t0)
Thread.sleep(50)
assert(Time.now === t0)
val delta = 100.milliseconds
t += delta
assert(Time.now === t0 + delta)
}
}
"withCurrentTimeFrozen" in {
val t0 = new Time(123456789L)
Time.withCurrentTimeFrozen { _ =>
val t0 = Time.now
Thread.sleep(50)
assert(Time.now === t0)
}
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"advance" in {
val t0 = new Time(123456789L)
val delta = 5.seconds
Time.withTimeAt(t0) { tc =>
assert(Time.now === t0)
tc.advance(delta)
assert(Time.now === (t0 + delta))
}
assert((Time.now.inMillis - System.currentTimeMillis).abs < 20L)
}
"sleep" in {
Time.withCurrentTimeFrozen { ctl =>
val ctx = Local.save()
val r = new Runnable {
def run() {
Local.restore(ctx)
Time.sleep(5.seconds)
}
}
@volatile var x = 0
val t = new Thread(r)
t.start()
assert(t.isAlive == true)
eventually { assert(t.getState === Thread.State.TIMED_WAITING) }
ctl.advance(5.seconds)
t.join()
assert(t.isAlive === false)
}
}
"compare" in {
assert(10.seconds.afterEpoch < 11.seconds.afterEpoch)
assert(10.seconds.afterEpoch === 10.seconds.afterEpoch)
assert(11.seconds.afterEpoch > 10.seconds.afterEpoch)
assert(Time.fromMilliseconds(Long.MaxValue) > Time.now)
}
"+ delta" in {
assert(10.seconds.afterEpoch + 5.seconds === 15.seconds.afterEpoch)
}
"- delta" in {
assert(10.seconds.afterEpoch - 5.seconds === 5.seconds.afterEpoch)
}
"- time" in {
assert(10.seconds.afterEpoch - 5.seconds.afterEpoch === 5.seconds)
}
"max" in {
assert((10.seconds.afterEpoch max 5.seconds.afterEpoch) === 10.seconds.afterEpoch)
assert((5.seconds.afterEpoch max 10.seconds.afterEpoch) === 10.seconds.afterEpoch)
}
"min" in {
assert((10.seconds.afterEpoch min 5.seconds.afterEpoch) === 5.seconds.afterEpoch)
assert((5.seconds.afterEpoch min 10.seconds.afterEpoch) === 5.seconds.afterEpoch)
}
"moreOrLessEquals" in {
val now = Time.now
assert(now.moreOrLessEquals(now + 1.second, 1.second) === true)
assert(now.moreOrLessEquals(now - 1.seconds, 1.second) === true)
assert(now.moreOrLessEquals(now + 2.seconds, 1.second) === false)
assert(now.moreOrLessEquals(now - 2.seconds, 1.second) === false)
}
"floor" in {
val format = new TimeFormat("yyyy-MM-dd HH:mm:ss.SSS")
val t0 = format.parse("2010-12-24 11:04:07.567")
assert(t0.floor(1.millisecond) === t0)
assert(t0.floor(10.milliseconds) === format.parse("2010-12-24 11:04:07.560"))
assert(t0.floor(1.second) === format.parse("2010-12-24 11:04:07.000"))
assert(t0.floor(5.second) === format.parse("2010-12-24 11:04:05.000"))
assert(t0.floor(1.minute) === format.parse("2010-12-24 11:04:00.000"))
assert(t0.floor(1.hour) === format.parse("2010-12-24 11:00:00.000"))
}
"since" in {
val t0 = Time.now
val t1 = t0 + 10.seconds
assert(t1.since(t0) === 10.seconds)
assert(t0.since(t1) === (-10).seconds)
}
"sinceEpoch" in {
val t0 = Time.epoch + 100.hours
assert(t0.sinceEpoch === 100.hours)
}
"sinceNow" in {
Time.withCurrentTimeFrozen { _ =>
val t0 = Time.now + 100.hours
assert(t0.sinceNow === 100.hours)
}
}
"fromSeconds(Double)" in {
val tolerance = 2.microseconds // we permit 1us slop
forAll { i: Int =>
assert(Time.fromSeconds(i).moreOrLessEquals(Time.fromFractionalSeconds(i.toDouble), tolerance))
}
forAll { d: Double =>
val magic = 9223372036854775L // cribbed from Time.fromMicroseconds
val microseconds = d * 1.second.inMicroseconds
whenever (microseconds > -magic && microseconds < magic) {
assert(Time.fromMicroseconds(microseconds.toLong).moreOrLessEquals(Time.fromFractionalSeconds(d), tolerance))
}
}
forAll { l: Long =>
val seconds: Double = l.toDouble / 1.second.inNanoseconds
assert(Time.fromFractionalSeconds(seconds).moreOrLessEquals(Time.fromNanoseconds(l), tolerance))
}
}
"fromMicroseconds" in {
assert(Time.fromMicroseconds(0).inNanoseconds === 0L)
assert(Time.fromMicroseconds(-1).inNanoseconds === -1L * 1000L)
assert(Time.fromMicroseconds(Long.MaxValue).inNanoseconds === Long.MaxValue)
assert(Time.fromMicroseconds(Long.MaxValue-1) === Time.Top)
assert(Time.fromMicroseconds(Long.MinValue) === Time.Bottom)
assert(Time.fromMicroseconds(Long.MinValue+1) === Time.Bottom)
val currentTimeMicros = System.currentTimeMillis()*1000
assert(Time.fromMicroseconds(currentTimeMicros).inNanoseconds === currentTimeMicros.microseconds.inNanoseconds)
}
"fromMillis" in {
assert(Time.fromMilliseconds(0).inNanoseconds === 0L)
assert(Time.fromMilliseconds(-1).inNanoseconds === -1L * 1000000L)
assert(Time.fromMilliseconds(Long.MaxValue).inNanoseconds === Long.MaxValue)
assert(Time.fromMilliseconds(Long.MaxValue-1) === Time.Top)
assert(Time.fromMilliseconds(Long.MinValue) === Time.Bottom)
assert(Time.fromMilliseconds(Long.MinValue+1) === Time.Bottom)
val currentTimeMs = System.currentTimeMillis
assert(Time.fromMilliseconds(currentTimeMs).inNanoseconds === currentTimeMs * 1000000L)
}
"until" in {
val t0 = Time.now
val t1 = t0 + 10.seconds
assert(t0.until(t1) === 10.seconds)
assert(t1.until(t0) === (-10).seconds)
}
"untilEpoch" in {
val t0 = Time.epoch - 100.hours
assert(t0.untilEpoch === 100.hours)
}
"untilNow" in {
Time.withCurrentTimeFrozen { _ =>
val t0 = Time.now - 100.hours
assert(t0.untilNow === 100.hours)
}
}
}
}
|
slackhappy/util
|
util-core/src/test/scala/com/twitter/util/TimeTest.scala
|
Scala
|
apache-2.0
| 19,957
|
package lambdacalculus.logging
import ch.qos.logback.core.LayoutBase
import ch.qos.logback.classic.spi.ILoggingEvent
import java.util.Date
import java.text.SimpleDateFormat
class LogbackDefaultLayout extends LayoutBase[ILoggingEvent] {
override def doLayout(event:ILoggingEvent): String = {
val elapsedTimeMs = event.getTimeStamp - event.getLoggerContextVO.getBirthTime
val date = new Date(elapsedTimeMs)
val formatter = new SimpleDateFormat("ss:SSS")
val elapsedTime = formatter.format(date)
val level = event.getLevel
val loggerName = event.getLoggerName.split("\\.").last
val msg = event.getFormattedMessage
val msgStr = s"($elapsedTime) [$level] $loggerName: \n$msg"
val longestLineOfMsg = msgStr.split("\n").reduceLeft( (s1, s2) =>
if(s1.size > s2.size) s1 else s2
).size
val msgSep = "-" * (if(longestLineOfMsg <80) longestLineOfMsg else 80)
s"$msgStr\n$msgSep\n"
}
}
|
cn-uofbasel/nfn-scala
|
lambdacalc/src/main/scala/lambdacalculus/logging/LogbackDefaultLayout.scala
|
Scala
|
isc
| 943
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.staticStringTest
import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestTestCase
/**
* @author Roman.Shein
* @since 26.06.2015.
*/
trait FreeSpecStaticStringTest extends ScalaTestTestCase {
val freeSpecClassName = "FreeSpecStringTest"
val freeSpecFileName = freeSpecClassName + ".scala"
addSourceFile(freeSpecFileName,
s"""
|import org.scalatest._
|
|class $freeSpecClassName extends FreeSpec {
| val constName = " consts"
| val otherConstName = "Const name"
| val innerConst = "InnerConst"
|
| "A" + " FreeSpecTest" - {
| "should work with sum" in {
| }
|
| "should work with sum of" + constName in {
| }
| }
|
| otherConstName - {
| "innerNonConst" in {
| }
|
| innerConst in {
| }
| }
|
| "base " + foo() - {
| "unreachable" in {
| }
| }
|}
""".stripMargin.trim()
)
def testFreeSpecSum() = {
assert(checkConfigAndSettings(createTestFromLocation(8, 7, freeSpecFileName), freeSpecClassName,
"A FreeSpecTest should work with sum"))
}
def testFreeSpecVal() = {
assert(checkConfigAndSettings(createTestFromLocation(16, 7, freeSpecFileName), freeSpecClassName,
"Const name innerNonConst"))
assert(checkConfigAndSettings(createTestFromLocation(19, 7, freeSpecFileName), freeSpecClassName,
"Const name InnerConst"))
}
def testFreeSpecValSum() = {
assert(checkConfigAndSettings(createTestFromLocation(11, 7, freeSpecFileName), freeSpecClassName,
"A FreeSpecTest should work with sum of consts"))
}
def testFreeSpecNonConst() = {
assert(checkConfigAndSettings(createTestFromLocation(24, 7, freeSpecFileName), freeSpecClassName))
}
}
|
ilinum/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/staticStringTest/FreespecStaticStringTest.scala
|
Scala
|
apache-2.0
| 1,897
|
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_1
import uk.gov.nationalarchives.csv.validator._
import uk.gov.nationalarchives.csv.validator.schema.v1_0.{SchemaValidator => SchemaValidator1_0}
import uk.gov.nationalarchives.csv.validator.schema.{ColumnDefinition, GlobalDirective, Rule}
/**
* Set of rules to validate the schema (compatible with the CSV Schema 1.1)
* Overrides 1.0 rules for backward compatibilities and update only the rules who needs to be
* @author Valy Dia
*/
class SchemaValidator extends SchemaValidator1_0 {
override protected def rangeValid(columnDefinitions: List[ColumnDefinition]): Option[String] = {
def rangeCheck(rule: Rule): Option[String] = rule match {
case RangeRule(None,None) => Some(s"""Invalid range in 'range(*,*)' at least one value needs to be defined""")
case RangeRule(Some(min),Some(max)) => if (min > max) Some(s"""Invalid range, minimum greater than maximum in: 'range($min,$max)' at line: ${rule.pos.line}, column: ${rule.pos.column}""") else None
case _ => None
}
val v = for {
cd <- columnDefinitions
rule <- cd.rules
message = rangeCheck(rule)
if (message.isDefined)
} yield {
val errormessage = rule match {
case range:RangeRule => message.getOrElse("")
case _ => s"""Column: ${cd.id}: Invalid range, minimum greater than maximum: at line: ${rule.pos.line}, column: ${rule.pos.column}"""
}
s"""Column: ${cd.id}: """ + errormessage
}
if (v.isEmpty) None else Some(v.mkString(EOL))
}
}
object SchemaValidator {
def apply(g: List[GlobalDirective], c: List[ColumnDefinition]): String = {
val parser = new SchemaValidator()
parser.validate(g,c)
}
}
|
valydia/csv-validator
|
csv-validator-core/src/main/scala/uk/gov/nationalarchives/csv/validator/schema/v1_1/SchemaValidator.scala
|
Scala
|
mpl-2.0
| 2,081
|
object ScalaStuff {
// val run: Runnable = () => { /*do it*/ }
val run: Runnable = new Runnable() {
override def run() {
//do it
}
}
import scala.language.implicitConversions
implicit def functionToRunnable(f: () => Unit): Runnable = new Runnable() {
override def run() = f()
}
val run2: Runnable = () => { /*do it*/ }
def doIt(): Unit = { /* do it*/ }
val run3: Runnable = doIt _
val doIt2: () => Unit = () => { /*do it*/ }
def run4: Runnable = doIt2
val doIt3 = () => { /*do it*/ }
def run5: Runnable = doIt3
val doIt4 = new Function0[Unit] { override def apply(): Unit = { /*do it*/ } }
def run6: Runnable = doIt4
}
|
zcox/iascala-functions-concurrency
|
src/main/scala/stuff.scala
|
Scala
|
mit
| 675
|
package com.github.aselab.activerecord.squeryl
import com.github.aselab.activerecord._
import com.github.aselab.activerecord.dsl._
import org.squeryl.dsl._
import java.util.{Date, UUID}
import java.sql.Timestamp
import reflections._
class ExpressionConversion(field: FieldInfo) {
import ReflectionUtil._
def toExpression(value: Any): dsl.TypedExpression[_, _] = field match {
case f if f.is[String] => value.toOption[String]
case f if f.is[Boolean] => value.toOption[Boolean]
case f if f.is[Int] => value.toOption[Int]
case f if f.is[Long] => value.toOption[Long]
case f if f.is[Float] => value.toOption[Float]
case f if f.is[Double] => value.toOption[Double]
case f if f.is[BigDecimal] => value.toOption[BigDecimal]
case f if f.is[Timestamp] => value.toOption[Timestamp]
case f if f.is[Date] => value.toOption[Date]
case f if f.is[UUID] => value.toOption[UUID]
case _ => throw ActiveRecordException.unsupportedType(field.name)
}
def toEqualityExpression(v1: => Any, v2: => Any): ast.EqualityExpression =
new ast.EqualityExpression(toExpression(v1), toExpression(v2))
def toOrderByExpression(v1: => Any, order: String): ExpressionNode = {
val arg = new OrderByArg(toExpression(v1))
order.toLowerCase match {
case "asc" => arg.asc
case "desc" => arg.desc
case _ => throw new IllegalArgumentException("order must be 'asc' or 'desc'")
}
}
def toInExpression(v1: Any, v2: List[Any]): ast.InclusionOperator = try {
new ast.InclusionOperator(toExpression(v1), new ast.RightHandSideOfIn(
new ast.ConstantExpressionNodeList(v2, field match {
case f if f.is[String] => optionStringTEF.createOutMapper
case f if f.is[Boolean] => optionBooleanTEF.createOutMapper
case f if f.is[Int] => optionIntTEF.createOutMapper
case f if f.is[Long] => optionLongTEF.createOutMapper
case f if f.is[Float] => optionFloatTEF.createOutMapper
case f if f.is[Double] => optionDoubleTEF.createOutMapper
case f if f.is[BigDecimal] => optionBigDecimalTEF.createOutMapper
case f if f.is[Timestamp] => optionTimestampTEF.createOutMapper
case f if f.is[Date] => optionDateTEF.createOutMapper
case f if f.is[UUID] => optionUUIDTEF.createOutMapper
case _ => throw ActiveRecordException.unsupportedType(field.name)
})
))
} catch {
case e: RuntimeException => throw ActiveRecordException.unsupportedType(field.name)
}
}
|
aselab/scala-activerecord
|
activerecord/src/main/scala/squeryl/ExpressionConversion.scala
|
Scala
|
mit
| 2,494
|
package orz.mongo.tochka.test
import scala.util.Random
import com.mongodb.casbah.Imports._
import orz.mongo.tochka._
import orz.mongo.tochka.test.util.Mongo
class NumberFieldTest extends TestSuiteBase[Numbers] {
val testee = Seq(
Numbers(10, 100L, 0.01d),
Numbers(20, 200L, 0.02d),
Numbers(30, 300L, 0.03d),
Numbers(40, 400L, 0.04d),
Numbers(50, 500L, 0.05d)
)
val randomIndex = new Random().nextInt(testee.size)
test("insert") {
Mongo.drive(conf) { implicit db =>
val testee = Numbers(Int.MaxValue, Long.MaxValue, Double.MaxValue)
init(Seq(testee))
info(s"Numbers.findOne(_id $$eq ${testee._id}) @casbah")
val casbah = db("Numbers").findOne("_id" $eq testee._id)
casbah match {
case Some(casb) =>
info(s"-> $casb")
casb.get("_id") shouldEqual testee._id
casb.get("int") shouldEqual testee.int
casb.get("long") shouldEqual testee.long
casb.get("double") shouldEqual testee.double
case None =>
fail("find by casbah result is None")
}
}
}
test("find int eql ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(int == ${cond.int}).find")
val result = Numbers.where(_.int == cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int == cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(int $$eq ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $eq cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find int neq ?)") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(int != ${cond.int}).find")
val result = Numbers.where(_.int != cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int != cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(int $$ne ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $ne cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find int lt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.int).apply(testee.size / 2)
info(s"Numbers.where(int < ${cond.int}).find")
val result = Numbers.where(_.int < cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int < cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.int) shouldEqual expect
info(s"Numbers.find(int $$lt ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $lt cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find int lte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.int).apply(testee.size / 2)
info(s"Numbers.where(int <= ${cond.int}).find")
val result = Numbers.where(_.int <= cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int <= cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.int) shouldEqual expect
info(s"Numbers.find(int $$lte ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $lte cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find int gt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.int).apply(testee.size / 2)
info(s"Numbers.where(int > ${cond.int}).find")
val result = Numbers.where(_.int > cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int > cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.int) shouldEqual expect
info(s"Numbers.find(int $$gt ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $gt cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find int gte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.int).apply(testee.size / 2)
info(s"Numbers.where(int >= ${cond.int}).find")
val result = Numbers.where(_.int >= cond.int).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.int >= cond.int).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.int) shouldEqual expect
info(s"Numbers.find(int $$gte ${cond.int}) @casbah")
val casbah = db("Numbers").find("int" $gte cond.int).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long eql ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(long == ${cond.long}).find")
val result = Numbers.where(_.long == cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long == cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(long $$eq ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $eq cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long neq ?)") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(long != ${cond.long}).find")
val result = Numbers.where(_.long != cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long != cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(long $$ne ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $ne cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long lt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.long).apply(testee.size / 2)
info(s"Numbers.where(long < ${cond.long}).find")
val result = Numbers.where(_.long < cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long < cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.long) shouldEqual expect
info(s"Numbers.find(long $$lt ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $lt cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long lte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.long).apply(testee.size / 2)
info(s"Numbers.where(long <= ${cond.long}).find")
val result = Numbers.where(_.long <= cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long <= cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.long) shouldEqual expect
info(s"Numbers.find(long $$lte ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $lte cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long gt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.long).apply(testee.size / 2)
info(s"Numbers.where(long > ${cond.long}).find")
val result = Numbers.where(_.long > cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long > cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.long) shouldEqual expect
info(s"Numbers.find(long $$gt ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $gt cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find long gte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.long).apply(testee.size / 2)
info(s"Numbers.where(long >= ${cond.long}).find")
val result = Numbers.where(_.long >= cond.long).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.long >= cond.long).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.long) shouldEqual expect
info(s"Numbers.find(long $$gte ${cond.long}) @casbah")
val casbah = db("Numbers").find("long" $gte cond.long).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double eql ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(double == ${cond.double}).find")
val result = Numbers.where(_.double == cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double == cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(double $$eq ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $eq cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double neq ?)") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee(randomIndex)
info(s"Numbers.where(double != ${cond.double}).find")
val result = Numbers.where(_.double != cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double != cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_._id) shouldEqual expect
info(s"Numbers.find(double $$ne ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $ne cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double lt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.double).apply(testee.size / 2)
info(s"Numbers.where(double < ${cond.double}).find")
val result = Numbers.where(_.double < cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double < cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.double) shouldEqual expect
info(s"Numbers.find(double $$lt ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $lt cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double lte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.double).apply(testee.size / 2)
info(s"Numbers.where(double <= ${cond.double}).find")
val result = Numbers.where(_.double <= cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double <= cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.double) shouldEqual expect
info(s"Numbers.find(double $$lte ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $lte cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double gt ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.double).apply(testee.size / 2)
info(s"Numbers.where(double > ${cond.double}).find")
val result = Numbers.where(_.double > cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double > cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.double) shouldEqual expect
info(s"Numbers.find(double $$gt ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $gt cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
test("find double gte ?") {
Mongo.drive(conf) { implicit db =>
init()
val cond = testee.sortBy(_.double).apply(testee.size / 2)
info(s"Numbers.where(double >= ${cond.double}).find")
val result = Numbers.where(_.double >= cond.double).find
result.foreach(it => info(s"-> $it"))
val expect = testee.filter(_.double >= cond.double).sortBy(_._id)
result.size shouldEqual expect.size
result.sortBy(_.double) shouldEqual expect
info(s"Numbers.find(double $$gte ${cond.double}) @casbah")
val casbah = db("Numbers").find("double" $gte cond.double).toList
casbah.foreach(it => info(s"-> $it"))
assertEquals2casbah(result, casbah)
}
}
}
case class Numbers(int: Int, long: Long, double: Double, _id: ObjectId = new ObjectId) {
override def toString = s"Numbers(int=${int}, long=${long}, double=${double}, _id=${_id})"
}
object Numbers extends Schema[Numbers] {
case object int extends IntField
case object long extends LongField
case object double extends DoubleField
case object _id extends IdField
}
|
fkmt-disk/tochka
|
src/test/scala/orz/mongo/tochka/test/NumberFieldTest.scala
|
Scala
|
mit
| 14,446
|
package com.rajiv.server
import java.nio.ByteBuffer
object Protocol {
val MAX_LENGTH = 128
}
class Protocol(var length: Int, var payload: ByteBuffer) {
def reset() {
length = -1
payload = null
}
def isInitialized(): Boolean = {
(length > 0 && payload != null)
}
def reset(length: Int, payload: ByteBuffer) {
this.length = length
this.payload = payload
}
def isComplete(): Boolean = {
payload.position() == length
}
}
|
RajivKurian/Java-NIO-example
|
src/main/scala/com/rajiv/server/Protocol.scala
|
Scala
|
apache-2.0
| 467
|
import java.io.File
import sbt._
import sys.process._
/**
* @author Emmanouil Antonios Platanios
*/
object BuildTool {
sealed trait BuildTool {
/** Name of this build tool. */
val name: String
/** Detects whether or not this build tool is configured in the provided directory.
*
* For example, for the Make build tool, this would return `true` if a Makefile is present in the directory.
*
* @param baseDirectory Directory to check.
* @return Boolean value indicating whether or not this build tool is configured in the provided directory.
*/
def detect(baseDirectory: File): Boolean
/** Represents an instance (i.e., a build configuration), for this build tool, that contains the actual tasks that
* can be perfomed on a specific configuration (such as those configured in your Makefile). */
trait Instance {
/** Invokes the native build tool's clean task */
def clean(): Unit
/** Invokes the native build tool's main task, resulting in a single shared library file.
*
* @param targetDirectory The directory into which the shared library is copied.
* @return The shared library file.
*/
def libraries(targetDirectory: File): Seq[File]
}
/** Gets an instance (i.e., a build configuration) of this tool for the specified directory. */
def getInstance(baseDirectory: File, buildDirectory: File, logger: Logger): Instance
}
/** Trait that defines an API for native build tools that use a standard `configure && make && make install` process,
* where the configure step is left abstract. */
sealed trait ConfigureMakeInstall { self: BuildTool =>
trait Instance extends self.Instance {
val log : Logger
val baseDirectory : File
val buildDirectory: File
def clean(): Unit = if (buildDirectory.list().contains("Makefile")) Process("make clean", buildDirectory) ! log
def configure(targetDirectory: File): ProcessBuilder
def make(): ProcessBuilder = Process("make VERBOSE=1", buildDirectory)
def install(): ProcessBuilder = Process("make install", buildDirectory)
def libraries(targetDirectory: File): Seq[File] = {
val exitCode: Int = (configure(targetDirectory) #&& make() #&& install()) ! log
if (exitCode != 0)
sys.error(s"Failed to build the native library. Exit code: $exitCode.")
val products: List[File] = (targetDirectory ** ("*.so" | "*.dylib" | "*.dll")).get.filter(_.isFile).toList
if (products == Nil)
sys.error(s"No files were created during compilation, something went wrong with the $name configuration.")
products
}
}
}
/** Make build tool. */
object Make extends BuildTool with ConfigureMakeInstall {
override val name: String = "Make"
override def detect(baseDirectory: File): Boolean = baseDirectory.list().contains("Makefile")
override def getInstance(baseDir: File, buildDir: File, logger: Logger) = new Instance {
override val log : Logger = logger
override val baseDirectory : File = baseDir
override val buildDirectory: File = buildDir
override def configure(target: File): ProcessBuilder = Process(
s"cp ${baseDirectory.getAbsolutePath}/Makefile $buildDirectory/Makefile", buildDirectory)
}
}
/** Autotools build tool. */
object Autotools extends BuildTool with ConfigureMakeInstall {
val name: String = "Autotools"
def detect(baseDirectory: File): Boolean = baseDirectory.list().contains("configure")
override def getInstance(baseDir: File, buildDir: File, logger: Logger) = new Instance {
override val log : Logger = logger
override val baseDirectory : File = baseDir
override val buildDirectory: File = buildDir
override def configure(target: File): ProcessBuilder = Process(
// Disable producing versioned library files since that is not needed for fat JAR files.
s"${baseDirectory.getAbsolutePath}/configure " +
s"--prefix=${target.getAbsolutePath} " +
s"--libdir=${target.getAbsolutePath} " +
"--disable-versioned-lib",
buildDirectory)
}
}
/** CMake build tool. */
object CMake extends BuildTool with ConfigureMakeInstall {
override val name: String = "CMake"
override def detect(baseDirectory: File): Boolean = baseDirectory.list().contains("CMakeLists.txt")
override def getInstance(baseDir: File, buildDir: File, logger: Logger) = new Instance {
override val log : Logger = logger
override val baseDirectory : File = baseDir
override val buildDirectory: File = buildDir
override def configure(target: File): ProcessBuilder = Process(
// Disable producing versioned library files since that is not needed for fat JAR files.
"cmake" ::
s"-DLD_LIBRARY_PATH=${System.getProperty("java.library.path")}:$$LD_LIBRARY_PATH" ::
s"-DCMAKE_INSTALL_PREFIX:PATH=${target.getAbsolutePath}" ::
"-DCMAKE_BUILD_TYPE=Release" ::
baseDirectory.getAbsolutePath :: Nil,
buildDirectory)
}
}
}
|
eaplatanios/tensorflow
|
tensorflow/scala/project/BuildTool.scala
|
Scala
|
apache-2.0
| 5,229
|
import java.util.Properties
import com.typesafe.sbt.SbtNativePackager.autoImport.{maintainer, packageDescription, packageSummary}
import sbt.Keys._
import sbt._
import sbtrelease.ReleasePlugin.autoImport._
import sbtrelease.ReleasePlugin.autoImport.{ReleaseStep, ReleaseTransformations}
import sbtrelease.Version.Bump.Next
import sbtbuildinfo.BuildInfoPlugin.autoImport.{BuildInfoKey, buildInfoKeys, buildInfoPackage}
import com.typesafe.sbt.packager.universal.UniversalPlugin.autoImport._
import com.typesafe.sbt.packager.docker.DockerPlugin.autoImport.{dockerBaseImage, dockerUpdateLatest}
object Settings {
lazy val kamiRepoUrl = "http://192.168.1.101/"
lazy val commonSettings = scalaSettings ++ lintingSettings
lazy val settingsGatling = scalaSettings ++ buildSettings
lazy val publishSettings = packagerSettings ++ dockerSettings
lazy val doNotPublishSettings = Seq(publish := {}, publishLocal := {})
lazy val scalaSettings = Seq(
scalacOptions ++= Seq(
"-deprecation",
"-unchecked",
"-target:jvm-1.8",
"-feature",
"-language:_",
"-Yno-adapted-args",
"-Ywarn-numeric-widen",
"-Ywarn-value-discard",
"-Xfuture",
"-Ywarn-unused",
"-Ypartial-unification")
)
lazy val lintingSettings = Seq(
scalacOptions ++= Seq("-Xlint"),
scalacOptions in IntegrationTest ~= (_ filterNot (_ == "-Xlint"))
)
import ReleaseTransformations._
lazy val buildSettings = Seq(
organization := "kami",
organizationName := "Kami",
buildInfoPackage := "dataLoader",
maintainer := "Ievgen Liashchenko <ievgenen@gmail.com>",
packageDescription := "Personal Active Tutor",
packageSummary := "Personal Active Tutor\\"",
buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion, sbtVersion),
publishMavenStyle := true,
showSuccess := true,
ivyLoggingLevel := UpdateLogging.Full,
releaseVersionBump := Next,
releaseIgnoreUntrackedFiles := true,
releaseVersionFile := file(".") / "version.sbt",
publishTo := {
val properties = new Properties()
IO.load(properties, new File("repository.properties"))
val repo = properties.getProperty("url")
val path = isSnapshot.value match {
case true => properties.getProperty("snapshot")
case false => properties.getProperty("release")
}
Some("Nexus Realm" at repo + path)
},
credentials in ThisBuild ++= Seq(
Credentials(Path.userHome / ".ivy2" / ".credentials")
),
publishArtifact in(Compile, packageSrc) := false,
resolvers ++= Seq(
"kamiReleasesRepo" at s"$kamiRepoUrl/nexus/content/repositories/kami/",
Resolver.typesafeRepo("releases").copy("typesafe-releases-custom"),
Resolver.sonatypeRepo("snapshots"),
Resolver.sonatypeRepo("releases")
)) ++
Seq(releaseProcess := releaseSteps)
lazy val packagerSettings = Seq(
mappings in Universal += {
val conf = (resourceDirectory in Compile).value / "remote" / "application.conf"
conf -> "conf/application.conf"
},
mappings in Universal += {
val logback = (resourceDirectory in Compile).value / "remote" / "logback.xml"
logback β "conf/logback.xml"
},
javaOptions in Universal ++= Seq(
"-Dconfig.file=conf/application.conf",
"-Dlogback.configurationFile=conf/logback.xml"
)
)
lazy val dockerSettings = Seq(
dockerBaseImage := "kami/openjdk8:141",
dockerUpdateLatest := true
)
val releaseSteps = Seq[ReleaseStep](
inquireVersions,
runClean,
runTest,
setReleaseVersion,
commitReleaseVersion,
tagRelease,
releaseStepCommand("universal:publish"),
setNextVersion,
commitNextVersion,
pushChanges)
}
|
ievgenen/workingstats
|
project/Settings.scala
|
Scala
|
mit
| 3,735
|
package io.circe.tests
import algebra.Eq
import cats.std.AllInstances
import io.circe.{ Decoder, Encoder, Json }
import org.scalacheck.{ Arbitrary, Gen }
package object examples extends AllInstances with ArbitraryInstances with MissingInstances {
val glossary: Json = Json.obj(
"glossary" -> Json.obj(
"title" -> Json.string("example glossary"),
"GlossDiv" -> Json.obj(
"title" -> Json.string("S"),
"GlossList" -> Json.obj(
"GlossEntry" -> Json.obj(
"ID" -> Json.string("SGML"),
"SortAs" -> Json.string("SGML"),
"GlossTerm" -> Json.string("Standard Generalized Markup Language"),
"Acronym" -> Json.string("SGML"),
"Abbrev" -> Json.string("ISO 8879:1986"),
"GlossDef" -> Json.obj(
"para" -> Json.string(
"A meta-markup language, used to create markup languages such as DocBook."
),
"GlossSeeAlso" -> Json.array(Json.string("GML"), Json.string("XML"))
),
"GlossSee" -> Json.string("markup")
)
)
)
)
)
}
package examples {
case class Qux[A](i: Int, a: A)
object Qux {
implicit def eqQux[A: Eq]: Eq[Qux[A]] = Eq.by(_.a)
implicit def arbitraryQux[A](implicit A: Arbitrary[A]): Arbitrary[Qux[A]] =
Arbitrary(
for {
i <- Arbitrary.arbitrary[Int]
a <- A.arbitrary
} yield Qux(i, a)
)
}
case class Wub(x: Long)
object Wub {
implicit val eqWub: Eq[Wub] = Eq.by(_.x)
implicit val arbitraryWub: Arbitrary[Wub] =
Arbitrary(Arbitrary.arbitrary[Long].map(Wub(_)))
}
sealed trait Foo
case class Bar(i: Int, s: String) extends Foo
case class Baz(xs: List[String]) extends Foo
case class Bam(w: Wub, d: Double) extends Foo
object Baz {
implicit val decodeBaz: Decoder[Baz] = Decoder[List[String]].map(Baz(_))
implicit val encodeBaz: Encoder[Baz] = Encoder.instance {
case Baz(xs) => Json.fromValues(xs.map(Json.string))
}
}
object Foo {
implicit val eqFoo: Eq[Foo] = Eq.fromUniversalEquals
implicit val arbitraryFoo: Arbitrary[Foo] = Arbitrary(
Gen.oneOf(
for {
i <- Arbitrary.arbitrary[Int]
s <- Arbitrary.arbitrary[String]
} yield Bar(i, s),
Gen.listOf(Arbitrary.arbitrary[String]).map(Baz.apply),
for {
w <- Arbitrary.arbitrary[Wub]
d <- Arbitrary.arbitrary[Double]
} yield Bam(w, d)
)
)
}
}
|
beni55/circe
|
tests/shared/src/main/scala/io/circe/tests/examples/package.scala
|
Scala
|
apache-2.0
| 2,537
|
package us.feliscat.time
import us.feliscat.m17n.MultiLingual
/**
* <pre>
* Created on 2017/02/08.
* </pre>
*
* @author K.Sakamoto
*/
trait MultiLingualTimeExtractorForWorldHistory extends TimeExtractor with MultiLingual
|
ktr-skmt/FelisCatusZero-multilingual
|
libraries/src/main/scala/us/feliscat/time/MultiLingualTimeExtractorForWorldHistory.scala
|
Scala
|
apache-2.0
| 235
|
package pl.iterators.kebs.scalacheck
import enumeratum.ScalacheckInstances
import org.scalacheck.{Arbitrary, Gen, ScalacheckShapeless}
import pl.iterators.kebs.macros.CaseClass1Rep
import java.net.{URI, URL}
import java.time.temporal.ChronoUnit
import java.time._
import java.util.concurrent.TimeUnit
import scala.reflect.ClassTag
import scala.util.Random
trait CommonArbitrarySupport extends ScalacheckShapeless with ScalacheckInstances {
implicit def caseClass1RepArbitraryPredef[T, A](
implicit rep: CaseClass1Rep[T, A],
arbitrary: Arbitrary[A]
): Arbitrary[T] =
Arbitrary(arbitrary.arbitrary.map(rep.apply(_)))
}
trait MinimalArbitrarySupport {
implicit def emptyOption[T: Arbitrary]: Arbitrary[Option[T]] =
Arbitrary(Gen.const(Option.empty[T]))
implicit def emptySeq[T: Arbitrary]: Arbitrary[Seq[T]] =
Arbitrary(Gen.const(Seq.empty[T]))
implicit def emptyArray[T: Arbitrary: ClassTag]: Arbitrary[Array[T]] =
Arbitrary(Gen.const(Array.empty[T]))
implicit def emptySet[T: Arbitrary]: Arbitrary[Set[T]] =
Arbitrary(Gen.const(Set.empty[T]))
implicit def emptyVector[T: Arbitrary]: Arbitrary[Vector[T]] =
Arbitrary(Gen.const(Vector.empty[T]))
implicit def emptyList[T: Arbitrary]: Arbitrary[List[T]] =
Arbitrary(Gen.const(List.empty[T]))
implicit def emptyMap[T: Arbitrary, U: Arbitrary]: Arbitrary[Map[T, U]] =
Arbitrary(Gen.const(Map.empty[T, U]))
}
trait MaximalArbitrarySupport {
implicit def someOption[T: Arbitrary]: Arbitrary[Option[T]] =
Arbitrary(Gen.some(Arbitrary.arbitrary[T]))
implicit def nonEmptySeq[T: Arbitrary]: Arbitrary[Seq[T]] =
Arbitrary(Gen.listOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[T]))
implicit def nonEmptyArray[T: Arbitrary: ClassTag]: Arbitrary[Array[T]] =
Arbitrary(Gen.listOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[T]).map(_.toArray))
implicit def nonEmptySet[T: Arbitrary]: Arbitrary[Set[T]] =
Arbitrary(Gen.listOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[T]).map(_.toSet))
implicit def nonEmptyVector[T: Arbitrary]: Arbitrary[Vector[T]] =
Arbitrary(Gen.listOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[T]).map(_.toVector))
implicit def nonEmptyList[T: Arbitrary]: Arbitrary[List[T]] =
Arbitrary(Gen.listOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[T]))
implicit def nonEmptyMap[T: Arbitrary, U: Arbitrary]: Arbitrary[Map[T, U]] =
Arbitrary(Gen.mapOfN(1 + Random.nextInt(3), Arbitrary.arbitrary[(T, U)]))
}
trait KebsArbitraryPredefs {
implicit val arbAlphaString: Arbitrary[String] =
Arbitrary(Gen.alphaNumStr)
implicit val arbInstant: Arbitrary[Instant] =
Arbitrary(Gen.calendar.map(_.toInstant))
implicit val arbLocalTime: Arbitrary[LocalTime] =
Arbitrary(Gen.calendar.map(_.toInstant.atZone(ZoneId.systemDefault()).toLocalTime))
implicit val arbLocalDate: Arbitrary[LocalDate] =
Arbitrary(Gen.calendar.map(_.toInstant.atZone(ZoneId.systemDefault()).toLocalDate))
implicit val arbLocalDateTime: Arbitrary[LocalDateTime] =
Arbitrary(Gen.calendar.map(_.toInstant.atZone(ZoneId.systemDefault()).toLocalDateTime))
implicit val arbZonedDataTime: Arbitrary[ZonedDateTime] =
Arbitrary(Gen.calendar.map(_.toInstant.atZone(ZoneId.systemDefault())))
implicit val arbDuration: Arbitrary[Duration] = Arbitrary(Gen.duration.map { duration =>
if (!duration.isFinite) ChronoUnit.FOREVER.getDuration
else if (duration.length == 0) Duration.ZERO
else
duration.unit match {
case TimeUnit.NANOSECONDS => Duration.ofNanos(duration.length)
case TimeUnit.MICROSECONDS => Duration.of(duration.length, ChronoUnit.MICROS)
case TimeUnit.MILLISECONDS => Duration.ofMillis(duration.length)
case TimeUnit.SECONDS => Duration.ofSeconds(duration.length)
case TimeUnit.MINUTES => Duration.ofMinutes(duration.length)
case TimeUnit.HOURS => Duration.ofHours(duration.length)
case TimeUnit.DAYS => Duration.ofDays(duration.length)
}
})
implicit val arbUrl: Arbitrary[URL] = Arbitrary {
for {
protocol <- Gen.oneOf("http", "https", "ftp", "file")
domain <- Gen.alphaNumStr
subdomain <- Gen.alphaNumStr
path <- Gen.alphaNumStr
} yield new URL(s"$protocol://$subdomain.$domain.test/$path")
}
implicit val arbUri: Arbitrary[URI] = Arbitrary(arbUrl.arbitrary.map(_.toURI))
}
|
theiterators/kebs
|
scalacheck/src/main/scala/pl/iterators/kebs/scalacheck/ArbitrarySupport.scala
|
Scala
|
mit
| 4,414
|
/**
* CopyrightΒ (c)Β 2016 IntelΒ CorporationΒ
*
* LicensedΒ underΒ theΒ ApacheΒ License,Β VersionΒ 2.0Β (theΒ "License");
* youΒ mayΒ notΒ useΒ thisΒ fileΒ exceptΒ inΒ complianceΒ withΒ theΒ License.
* YouΒ mayΒ obtainΒ aΒ copyΒ ofΒ theΒ LicenseΒ at
*
* Β Β Β Β Β http://www.apache.org/licenses/LICENSE-2.0
*
* UnlessΒ requiredΒ byΒ applicableΒ lawΒ orΒ agreedΒ toΒ inΒ writing,Β software
* distributedΒ underΒ theΒ LicenseΒ isΒ distributedΒ onΒ anΒ "ASΒ IS"Β BASIS,
* WITHOUTΒ WARRANTIESΒ ORΒ CONDITIONSΒ OFΒ ANYΒ KIND,Β eitherΒ expressΒ orΒ implied.
* SeeΒ theΒ LicenseΒ forΒ theΒ specificΒ languageΒ governingΒ permissionsΒ and
* limitationsΒ underΒ theΒ License.
*/
package org.trustedanalytics.sparktk.dicom.internal.constructors
import java.awt.image.Raster
import java.io._
import java.util.Iterator
import javax.imageio.stream.ImageInputStream
import javax.imageio.{ ImageIO, ImageReader }
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.StringUtils
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.DenseMatrix
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.trustedanalytics.sparktk.dicom.Dicom
import org.trustedanalytics.sparktk.frame.Frame
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import org.dcm4che3.imageio.plugins.dcm.{ DicomImageReadParam, DicomImageReader }
import org.dcm4che3.io.DicomInputStream
import org.dcm4che3.io.DicomInputStream.IncludeBulkData
import org.dcm4che3.tool.dcm2xml.org.trustedanalytics.sparktk.Dcm2Xml
object Import extends Serializable {
/**
* Get Pixel Data from Dicom Input Stream represented as Array of Bytes
*
* @param byteArray Dicom Input Stream represented as Array of Bytes
* @return DenseMatrix Pixel Data
*/
def getPixeldata(byteArray: Array[Byte]): DenseMatrix = {
val pixeldataInputStream = new DataInputStream(new ByteArrayInputStream(byteArray))
val pixeldicomInputStream = new DicomInputStream(pixeldataInputStream)
//create matrix
val iter: Iterator[ImageReader] = ImageIO.getImageReadersByFormatName("DICOM")
val readers: DicomImageReader = iter.next.asInstanceOf[DicomImageReader]
val param: DicomImageReadParam = readers.getDefaultReadParam.asInstanceOf[DicomImageReadParam]
val iis: ImageInputStream = ImageIO.createImageInputStream(pixeldicomInputStream)
readers.setInput(iis, true)
//pixels data raster
val raster: Raster = readers.readRaster(0, param)
val cols = raster.getWidth
val rows = raster.getHeight
val data = Array.ofDim[Double](cols, rows)
// Reading pixeldata along with x-axis and y-axis(x, y) but storing the data in 2D array as column-major.
// Mllib DenseMatrix stores data as column-major.
for (x <- 0 until cols) {
for (y <- 0 until rows) {
data(x)(y) = raster.getSample(x, y, 0)
}
}
new DenseMatrix(rows, cols, data.flatten)
}
/**
* Get Metadata Xml from Dicom Input Stream represented as byte array
*
* @param byteArray Dicom Input Stream represented as byte array
* @return String Xml Metadata
*/
def getMetadataXml(byteArray: Array[Byte]): String = {
val metadataInputStream = new DataInputStream(new ByteArrayInputStream(byteArray))
val metadataDicomInputStream = new DicomInputStream(metadataInputStream)
val dcm2xml = new Dcm2Xml()
val myOutputStream = new ByteArrayOutputStream()
dcm2xml.convert(metadataDicomInputStream, myOutputStream)
myOutputStream.toString()
}
/**
* Creates a dicom object with metadata and pixeldata frames
* |---> DataInputStream --> DicomInputStream --> Dcm2Xml --> Metadata XML (String)
* |
* Spark foreach DCM Image (FilePath, PortableDataStream) ---> ByteArray --->
* |
* |---> DataInputStream --> DicomInputStream --> ImageInputStream --> Raster --> Pixel Data (Dense Matrix)
*
* @param path Full path to the DICOM files directory
* @return Dicom object with MetadataFrame and PixeldataFrame
*/
def importDcm(sc: SparkContext, path: String, minPartitions: Int = 2): Dicom = {
val dicomFilesRdd = sc.binaryFiles(path, minPartitions)
val dcmMetadataPixelArrayRDD = dicomFilesRdd.map {
case (filePath, fileData) => {
// Open PortableDataStream to retrieve the bytes
val fileInputStream = fileData.open()
val byteArray = IOUtils.toByteArray(fileInputStream)
//Create the metadata xml
val metadata = getMetadataXml(byteArray)
//Create a dense matrix for pixel array
val pixeldata = getPixeldata(byteArray)
//Close the PortableDataStream
fileInputStream.close()
(metadata, pixeldata)
}
}.zipWithIndex()
dcmMetadataPixelArrayRDD.cache()
val sqlCtx = new SQLContext(sc)
import sqlCtx.implicits._
//create metadata pairrdd
val metaDataPairRDD: RDD[(Long, String)] = dcmMetadataPixelArrayRDD.map {
case (metadataPixeldata, id) => (id, metadataPixeldata._1)
}
val metadataDF = metaDataPairRDD.toDF("id", "metadata")
val metadataFrameRdd = FrameRdd.toFrameRdd(metadataDF)
val metadataFrame = new Frame(metadataFrameRdd, metadataFrameRdd.frameSchema)
//create image matrix pair rdd
val imageMatrixPairRDD: RDD[(Long, DenseMatrix)] = dcmMetadataPixelArrayRDD.map {
case (metadataPixeldata, id) => (id, metadataPixeldata._2)
}
val imageDF = imageMatrixPairRDD.toDF("id", "imagematrix")
val pixeldataFrameRdd = FrameRdd.toFrameRdd(imageDF)
val pixeldataFrame = new Frame(pixeldataFrameRdd, pixeldataFrameRdd.frameSchema)
new Dicom(metadataFrame, pixeldataFrame)
}
}
|
trustedanalytics/spark-tk
|
sparktk-core/src/main/scala/org/trustedanalytics/sparktk/dicom/internal/constructors/Import.scala
|
Scala
|
apache-2.0
| 5,997
|
package org.ucf.scala
/**
* @author
*/
import org.junit.Test
import org.junit.Assert._
class ScalaTestAPP {
@Test def testAdd() {
println("Hello World From Scala")
assertTrue(true)
}
}
|
bingrao/Scala-Learning
|
Underscore/src/test/scala/org/ucf/scala/ScalaTestApp.scala
|
Scala
|
mit
| 213
|
object line1 {
trait MyTrait
}
object line2 {
import line2._
class BugTest {def isTheBugHere(in: MyTrait.this.type#SomeData) = false}
}
|
scala/scala
|
test/files/neg/t8534.scala
|
Scala
|
apache-2.0
| 142
|
package org.scalaide.debug.internal.command
import java.util.concurrent.atomic.AtomicBoolean
import scala.concurrent.Future
import org.eclipse.debug.core.DebugEvent
import org.scalaide.debug.internal.JdiEventReceiver
import org.scalaide.debug.internal.model.JdiRequestFactory
import org.scalaide.debug.internal.model.ScalaDebugTarget
import org.scalaide.debug.internal.model.ScalaStackFrame
import org.scalaide.debug.internal.model.ScalaThread
import com.sun.jdi.event.StepEvent
import com.sun.jdi.request.StepRequest
object ScalaStepReturn {
def apply(scalaStackFrame: ScalaStackFrame): ScalaStep = {
val stepReturnRequest = JdiRequestFactory.createStepRequest(StepRequest.STEP_LINE, StepRequest.STEP_OUT, scalaStackFrame.thread)
val subordinate = new ScalaStepReturnSubordinate(scalaStackFrame.getDebugTarget, scalaStackFrame.thread, stepReturnRequest)
subordinate.scalaStep
}
}
/**
* Class used to manage a Scala step return. It keeps track of the request needed to perform this step.
* This class is thread safe. Instances are not to be created outside of the ScalaStepReturn object.
*/
private[command] class ScalaStepReturnSubordinate(debugTarget: ScalaDebugTarget, thread: ScalaThread, stepReturnRequest: StepRequest)
extends ScalaStep with JdiEventReceiver {
import scala.concurrent.ExecutionContext.Implicits.global
private val enabled = new AtomicBoolean
protected[command] def scalaStep: ScalaStep = this
override protected def innerHandle = {
// JDI event triggered when a step has been performed
case stepEvent: StepEvent =>
if (!debugTarget.cache.isTransparentLocation(stepEvent.location)) {
disable()
thread.suspendedFromScala(DebugEvent.STEP_RETURN)
true
} else false
}
override def step(): Unit = Future {
enable()
thread.resumeFromScala(scalaStep, DebugEvent.STEP_RETURN)
}
override def stop(): Unit = Future {
disable()
}
private def enable(): Unit = {
if (!enabled.getAndSet(true)) {
debugTarget.eventDispatcher.register(this, stepReturnRequest)
stepReturnRequest.enable()
}
}
private def disable(): Unit = {
if (enabled.getAndSet(false)) {
stepReturnRequest.disable()
debugTarget.eventDispatcher.unregister(stepReturnRequest)
debugTarget.virtualMachine.eventRequestManager.deleteEventRequest(stepReturnRequest)
}
}
}
|
dragos/scala-ide
|
org.scala-ide.sdt.debug/src/org/scalaide/debug/internal/command/ScalaStepReturn.scala
|
Scala
|
bsd-3-clause
| 2,401
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.rice.cli
import java.util.logging.Level._
import org.apache.spark.Logging
import org.bdgenomics.utils.cli._
import scala.collection.mutable.ListBuffer
object RiceMain extends Logging {
case class CommandGroup(name: String, commands: List[BDGCommandCompanion])
private val commandGroups =
List(
CommandGroup(
"QUANTIFICATION",
List(
Index,
Quantify)))
private def printCommands() {
println("\\n")
println("\\nChoose one of the following commands:")
commandGroups.foreach { grp =>
println("\\n%s".format(grp.name))
grp.commands.foreach(cmd =>
println("%20s : %s".format(cmd.commandName, cmd.commandDescription)))
}
println("\\n")
}
def main(args: Array[String]) {
log.info("rice invoked with args: %s".format(argsToString(args)))
if (args.size < 1) {
printCommands()
} else {
val commands =
for {
grp <- commandGroups
cmd <- grp.commands
} yield cmd
commands.find(_.commandName == args(0)) match {
case None => printCommands()
case Some(cmd) => cmd.apply(args drop 1).run()
}
}
}
// Attempts to format the `args` array into a string in a way
// suitable for copying and pasting back into the shell.
private def argsToString(args: Array[String]): String = {
def escapeArg(s: String) = "\\"" + s.replaceAll("\\\\\\"", "\\\\\\\\\\"") + "\\""
args.map(escapeArg).mkString(" ")
}
}
|
allenday/rice
|
rice-cli/src/main/scala/org/bdgenomics/rice/cli/RiceMain.scala
|
Scala
|
apache-2.0
| 2,298
|
package org.vitrivr.adampro.storage
import org.vitrivr.adampro.AdamTestBase
import org.vitrivr.adampro.communication.api.{EntityOp, RandomDataOp}
import org.vitrivr.adampro.data.datatypes.AttributeTypes
import org.vitrivr.adampro.data.entity.{AttributeDefinition, Entity}
import scala.util.Random
/**
* ADAMpro
*
* Ivan Giangreco
* July 2016
*/
class SolrHandlerTestSuite extends AdamTestBase {
val handlerName = "solr"
def ntuples() = Random.nextInt(500)
assert(ac.storageManager.contains(handlerName))
scenario("create an entity") {
val tuplesInsert = ntuples()
withEntityName { entityname =>
val attributetypes = Seq(AttributeTypes.INTTYPE, AttributeTypes.LONGTYPE, AttributeTypes.FLOATTYPE, AttributeTypes.DOUBLETYPE, AttributeTypes.STRINGTYPE, AttributeTypes.TEXTTYPE, AttributeTypes.BOOLEANTYPE)
val attributes = attributetypes.map(field => AttributeDefinition(field.name + "field", field, storagehandlername = handlerName)) ++ Seq(AttributeDefinition("tid", AttributeTypes.LONGTYPE, storagehandlername = handlerName))
EntityOp.create(entityname, attributes)
RandomDataOp.apply(entityname, tuplesInsert, Map("fv-dimensions" -> "10"))
val data = Entity.load(entityname).get.getData().get.collect()
data.foreach {
datum => //this should give an error if not possible
val intfield = datum.getAs[Int]("integerfield")
val longfield = datum.getAs[Long]("longfield")
val floatfield = datum.getAs[Float]("floatfield")
val doublefield = datum.getAs[Double]("doublefield")
val stringfield = datum.getAs[String]("stringfield")
val textfield = datum.getAs[String]("textfield")
val booleanfield = datum.getAs[Boolean]("booleanfield")
}
assert(data.size == tuplesInsert)
}
}
}
|
dbisUnibas/ADAMpro
|
src/test/scala/org/vitrivr/adampro/storage/SolrHandlerTestSuite.scala
|
Scala
|
mit
| 1,840
|
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import Incomplete.{Error, Value => IValue}
/** Describes why a task did not complete.
*
* @param node the task that did not complete that is described by this Incomplete instance
* @param tpe whether the task was incomplete because of an error or because it was skipped. Only Error is actually used and Skipped may be removed in the future.
* @param message an optional error message describing this incompletion
* @param causes a list of incompletions that prevented `node` from completing
* @param directCause the exception that caused `node` to not complete */
final case class Incomplete(node: Option[AnyRef], tpe: IValue = Error, message: Option[String] = None, causes: Seq[Incomplete] = Nil, directCause: Option[Throwable] = None)
extends Exception(message.orNull, directCause.orNull) {
override def toString = "Incomplete(node=" + node + ", tpe=" + tpe + ", msg=" + message + ", causes=" + causes + ", directCause=" + directCause +")"
}
object Incomplete extends Enumeration {
val Skipped, Error = Value
def transformTD(i: Incomplete)(f: Incomplete => Incomplete): Incomplete = transform(i, true)(f)
def transformBU(i: Incomplete)(f: Incomplete => Incomplete): Incomplete = transform(i, false)(f)
def transform(i: Incomplete, topDown: Boolean)(f: Incomplete => Incomplete): Incomplete =
{
import collection.JavaConversions._
val visited: collection.mutable.Map[Incomplete,Incomplete] = new java.util.IdentityHashMap[Incomplete, Incomplete]
def visit(inc: Incomplete): Incomplete =
visited.getOrElseUpdate(inc, if(topDown) visitCauses(f(inc)) else f(visitCauses(inc)))
def visitCauses(inc: Incomplete): Incomplete =
inc.copy(causes = inc.causes.map(visit) )
visit(i)
}
def visitAll(i: Incomplete)(f: Incomplete => Unit)
{
val visited = IDSet.create[Incomplete]
def visit(inc: Incomplete): Unit =
visited.process(inc)( () ) {
f(inc)
inc.causes.foreach(visit)
}
visit(i)
}
def linearize(i: Incomplete): Seq[Incomplete] =
{
var ordered = List[Incomplete]()
visitAll(i) { ordered ::= _ }
ordered
}
def allExceptions(is: Seq[Incomplete]): Iterable[Throwable] =
allExceptions(new Incomplete(None, causes = is))
def allExceptions(i: Incomplete): Iterable[Throwable] =
{
val exceptions = IDSet.create[Throwable]
visitAll(i) { exceptions ++= _.directCause.toList }
exceptions.all
}
def show(tpe: Value) = tpe match { case Skipped=> "skipped"; case Error => "error" }
}
|
kuochaoyi/xsbt
|
tasks/Incomplete.scala
|
Scala
|
bsd-3-clause
| 2,517
|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.z3
import org.geotools.factory.Hints
import org.locationtech.geomesa.index.api.{FilterStrategy, WrappedFeature}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.index.BaseFeatureIndex
import org.locationtech.geomesa.index.strategies.SpatioTemporalFilterStrategy
import org.opengis.feature.simple.SimpleFeatureType
trait XZ3Index[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W, R, C]
extends BaseFeatureIndex[DS, F, W, R, C, XZ3IndexValues] with SpatioTemporalFilterStrategy[DS, F, W] {
override val name: String = "xz3"
override protected val keySpace: XZ3IndexKeySpace = XZ3IndexKeySpace
// always apply the full filter to xz queries
override protected def useFullFilter(sft: SimpleFeatureType,
ds: DS,
filter: FilterStrategy[DS, F, W],
indexValues: Option[XZ3IndexValues],
hints: Hints): Boolean = true
}
|
ronq/geomesa
|
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/z3/XZ3Index.scala
|
Scala
|
apache-2.0
| 1,554
|
package sharry.backend.job
import sharry.common._
case class CleanupConfig(enabled: Boolean, interval: Duration, invalidAge: Duration) {}
|
eikek/sharry
|
modules/backend/src/main/scala/sharry/backend/job/CleanupConfig.scala
|
Scala
|
gpl-3.0
| 140
|
/*******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import org.w3c.dom.Node
import org.w3c.dom.Element
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr, FunctionId}
import kr.ac.kaist.jsaf.analysis.typing.{Semantics, ControlPoint, Helper, PreHelper}
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.Shell
object HTMLSelectElement extends DOM {
private val name = "HTMLSelectElement"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* instance */
private val prop_ins: List[(String, AbsProperty)] =
HTMLElement.getInsList2() ++ List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// DOM Level 1
("type", AbsConstValue(PropValue(ObjectValue(StrTop, F, T, T)))),
("selectedIndex", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("value", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("form", AbsConstValue(PropValue(ObjectValue(Value(HTMLFormElement.loc_ins), F, T, T)))),
("disabled", AbsConstValue(PropValue(ObjectValue(BoolTop, T, T, T)))),
("multiple", AbsConstValue(PropValue(ObjectValue(BoolTop, T, T, T)))),
("name", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("size", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("tabIndex", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("length", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("options", AbsConstValue(PropValue(ObjectValue(Value(HTMLOptionsCollection.loc_ins), F, T, T))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(HTMLElement.loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("add", AbsBuiltinFunc("HTMLSelectElement.add", 2)),
("remove", AbsBuiltinFunc("HTMLSelectElement.remove", 1)),
("blur", AbsBuiltinFunc("HTMLSelectElement.blur", 0)),
("focus", AbsBuiltinFunc("HTMLSelectElement.focus", 0))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = if(Shell.params.opt_Dommodel2) List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global), (loc_ins, prop_ins)
) else List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) )
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
//TODO: not yet implemented
//case "HTMLSelectElement.add" => ((h,ctx),(he,ctxe))
//case "HTMLSelectElement.remove" => ((h,ctx),(he,ctxe))
("HTMLSelectElement.blur" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
})),
("HTMLSelectElement.focus" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
//TODO: not yet implemented
//case "HTMLSelectElement.add" => ((h,ctx),(he,ctxe))
//case "HTMLSelectElement.remove" => ((h,ctx),(he,ctxe))
("HTMLSelectElement.blur" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val PureLocalLoc = cfg.getPureLocal(cp)
((PreHelper.ReturnStore(h, PureLocalLoc, Value(UndefTop)), ctx), (he, ctxe))
})),
("HTMLSelectElement.focus" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val PureLocalLoc = cfg.getPureLocal(cp)
((PreHelper.ReturnStore(h, PureLocalLoc, Value(UndefTop)), ctx), (he, ctxe))
}))
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
//TODO: not yet implemented
//case "HTMLSelectElement.add" => ((h,ctx),(he,ctxe))
//case "HTMLSelectElement.remove" => ((h,ctx),(he,ctxe))
("HTMLSelectElement.blur" -> (
(h: Heap, ctx: Context, cfg: CFG, fun: String, args: CFGExpr, fid: FunctionId) => {
LPSet((SinglePureLocalLoc, "@return"))
})),
("HTMLSelectElement.focus" -> (
(h: Heap, ctx: Context, cfg: CFG, fun: String, args: CFGExpr, fid: FunctionId) => {
LPSet((SinglePureLocalLoc, "@return"))
}))
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
//TODO: not yet implemented
//case "HTMLSelectElement.add" => ((h,ctx),(he,ctxe))
//case "HTMLSelectElement.remove" => ((h,ctx),(he,ctxe))
("HTMLSelectElement.blur" -> (
(h: Heap, ctx: Context, cfg: CFG, fun: String, args: CFGExpr, fid: FunctionId) => {
LPSet((SinglePureLocalLoc, "@return"))
})),
("HTMLSelectElement.focus" -> (
(h: Heap, ctx: Context, cfg: CFG, fun: String, args: CFGExpr, fid: FunctionId) => {
LPSet((SinglePureLocalLoc, "@return"))
}))
)
}
/* instance */
override def getInstance(cfg: CFG): Option[Loc] = Some(newRecentLoc())
/* list of properties in the instance object */
override def getInsList(node: Node): List[(String, PropValue)] = node match {
case e: Element =>
// This object has all properties of the HTMLElement object
HTMLElement.getInsList(node) ++ List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("type", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("type")), F, T, T))),
("selectedIndex", PropValue(ObjectValue(Value(NumTop), T, T, T))),
("value", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("value")), T, T, T))),
("disabled", PropValue(ObjectValue((if(e.getAttribute("disabled")=="true") T else F), T, T, T))),
("multiple", PropValue(ObjectValue((if(e.getAttribute("multiple")=="true") T else F), T, T, T))),
("name", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("name")), T, T, T))),
("size", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("size")))), T, T, T))),
("tabIndex", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("tabIndex")))), T, T, T))),
("form", PropValue(ObjectValue(NullTop, F, T, T))),
// Modified in DOM Level 2
("length", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("length")))), T, T, T))))
// 'options' in DOM Level 2 is updated in DOMHelper.modelNode
case _ => {
System.err.println("* Warning: " + node.getNodeName + " cannot have instance objects.")
List()
}
}
def getInsList(ttype: PropValue, selectedIndex: PropValue, value: PropValue, disabled: PropValue,
multiple: PropValue, name: PropValue, size: PropValue, tabIndex: PropValue,
form: PropValue, length: PropValue, xpath: PropValue): List[(String, PropValue)] = List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("type", ttype),
("selectedIndex", selectedIndex),
("value", value),
("disabled", disabled),
("multiple", multiple),
("name", name),
("size", size),
("tabIndex", tabIndex),
("form", form),
// DOM Level 2
("length", length),
("xpath", xpath)
)
override def default_getInsList(): List[(String, PropValue)] = {
val ttype = PropValue(ObjectValue(AbsString.alpha(""), F, T, T))
val selectedIndex= PropValue(ObjectValue(NumTop, T, T, T))
val value = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val disabled = PropValue(ObjectValue(BoolFalse, T, T, T))
val multiple = PropValue(ObjectValue(BoolFalse, T, T, T))
val name = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val size = PropValue(ObjectValue(NumTop, T, T, T))
val tabIndex = PropValue(ObjectValue(NumTop, T, T, T))
val form = PropValue(ObjectValue(NullTop, F, T, T))
val length = PropValue(ObjectValue(NumTop, T, T, T))
val xpath = PropValue(ObjectValue(AbsString.alpha(""), F, F, F))
// This object has all properties of the HTMLElement object
HTMLElement.default_getInsList :::
getInsList(ttype, selectedIndex, value, disabled, multiple, name, size, tabIndex, form, length, xpath)
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml/HTMLSelectElement.scala
|
Scala
|
bsd-3-clause
| 10,504
|
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends dotty.runtime.LegacyApp {
reify {
println("%s %s %s".format(List("a", "b", "c"): _*))
}.eval
}
|
yusuke2255/dotty
|
tests/disabled/macro/run/t5824.scala
|
Scala
|
bsd-3-clause
| 196
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import org.joda.time.LocalDate
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.frs105.retriever.Frs105AccountsBoxRetriever
import uk.gov.hmrc.ct.accounts.{AC3, AccountsIntegerValidationFixture, MockFrs105AccountsRetriever}
class AC7998Spec extends WordSpec with Matchers with MockitoSugar with AccountsIntegerValidationFixture[Frs105AccountsBoxRetriever] with MockFrs105AccountsRetriever {
private val boxID = "AC7998"
private val minNumberOfEmployees = Some(0)
private val maxNumberOfEmployees = Some(99999)
private val isMandatory = Some(true)
private val lastDayBeforeMandatoryNotes = LocalDate.parse("2016-12-31")
private val mandatoryNotesStartDate = LocalDate.parse("2017-01-01")
"When the beginning of the accounting period is before 2017, AC7998" should {
"pass validation" when {
"employee information field is empty" in {
when(boxRetriever.ac3()) thenReturn AC3(lastDayBeforeMandatoryNotes)
AC7998(None).validate(boxRetriever) shouldBe Set()
}
}
}
"When the beginning of the accounting period is after 2016-31-12, AC7998" should {
"validate correctly" when {
when(boxRetriever.ac3()) thenReturn AC3(mandatoryNotesStartDate)
testIntegerFieldValidation(boxID, AC7998, minNumberOfEmployees, maxNumberOfEmployees, isMandatory)
}
}
}
|
hmrc/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC7998Spec.scala
|
Scala
|
apache-2.0
| 2,072
|
/*
* This file is part of Gwaihir
* Copyright (C) 2013, 2014 Alvaro Polo
*
* Gwaihir is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Gwaihir is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Gwaihir. If not,
* see <http://www.gnu.org/licenses/>.
*/
package org.oac.gwaihir.core
import org.scalatest.{Matchers, FlatSpec}
case class DummyEvaluator(eventChannel: EventChannel)
extends ConditionEvaluator with EventChannelProvider {
val dev1 = DeviceId("foobar/dev1")
val dev2 = DeviceId("foobar/dev2")
var matches: Option[Boolean] = None
val dev1IsOn = eventMatch(dev1, {
case (isOn: Boolean) => Some(dev1 -> isOn)
case _ => None
})
val dev2IsOver100 = eventMatch(dev2, {
case (power: Int) if power > 100 => Some(dev2 -> power)
case _ => None
})
watch(dev1IsOn and dev2IsOver100)
{ _ => matches = Some(true) }
{ matches = Some(false) }
}
class ConditionEvaluatorTest extends FlatSpec with Matchers {
"Condition evaluator" must "consider undetermined matching when no event is sent" in
new EvaluatorInitialized {
eval.matches should be (None)
}
it must "consider not matching when conditions are not met" in
new EvaluatorInitialized {
channel.send(eval.dev1, true)
channel.send(eval.dev2, 10)
eval.matches should be (Some(false))
}
it must "consider matching when all conditions are met" in
new EvaluatorInitialized {
channel.send(eval.dev1, true)
channel.send(eval.dev2, 110)
eval.matches should be (Some(true))
}
it must "consider not matching when any conditions is not met anymore" in
new EvaluatorInitialized {
channel.send(eval.dev1, true)
channel.send(eval.dev2, 110)
eval.matches should be (Some(true))
channel.send(eval.dev2, 10)
eval.matches should be (Some(false))
}
trait EvaluatorInitialized {
val channel = EventChannel()
val eval = DummyEvaluator(channel)
}
}
|
apoloval/gwaihir
|
src/test/scala/org/oac/gwaihir/core/ConditionEvaluatorTest.scala
|
Scala
|
gpl-2.0
| 2,404
|
package com.giyeok.jparser.nparser
import com.giyeok.jparser.Inputs.Input
import com.giyeok.jparser.NGrammar.{NAtomicSymbol, NExcept, NJoin, NLookaheadExcept, NLookaheadIs, NSequence, NSimpleDerive, NTerminal}
import com.giyeok.jparser.nparser.AcceptCondition.AcceptCondition
import com.giyeok.jparser.nparser.ParsingContext._
import com.giyeok.jparser.{NGrammar, ParseResult, ParseResultFunc}
class ParseTreeConstructor[R <: ParseResult](resultFunc: ParseResultFunc[R])(grammar: NGrammar)(input: Seq[Input], val history: Seq[Graph], conditionFinal: Map[AcceptCondition, Boolean]) {
// conditionFinal foreach { kv => println(s"${kv._1} -> ${kv._2}") }
case class KernelEdge(start: Kernel, end: Kernel)
case class KernelGraph(nodes: Seq[Kernel], edges: Seq[KernelEdge]) {
val edgesByStart: Map[Kernel, Seq[KernelEdge]] = {
val edgesMap = edges groupBy {
_.start
}
((nodes.toSet -- edgesMap.keySet) map { n => n -> Seq[KernelEdge]() }).toMap ++ edgesMap
}
}
val finishes: Vector[KernelGraph] = {
(history map { graph =>
val filteredGraph = graph filterNode { node => conditionFinal(node.condition) }
val kernelNodes: Set[Kernel] = filteredGraph.nodes map {
_.kernel
}
val kernelEdges0: Set[KernelEdge] = filteredGraph.edges map {
case Edge(start, end) => KernelEdge(start.kernel, end.kernel)
}
// μλλ initial nodeλ‘ κ°λ edgeλ§ μκΈ° λλ¬Έμ edgeλ€μ μΆκ°ν΄μ κ³ λ €ν΄μ£Όμ΄μΌ νλλ°, μ°μ μ non-actual edgeλ€λ μ λΆ κ³ λ €νκ² νκΈ° λλ¬Έμ νμ μμ
def augmentEdges(queue: List[Kernel], edges: Set[KernelEdge]): Set[KernelEdge] =
queue match {
case head +: rest =>
if (head.pointer == 0) {
augmentEdges(rest, edges)
} else {
val initialKernel = Kernel(head.symbolId, 0, head.beginGen, head.beginGen)
val newEdges = edges filter {
_.end == initialKernel
} map { edge => KernelEdge(edge.start, head) }
augmentEdges(rest, edges ++ newEdges)
}
case List() =>
edges
}
// val kernelEdges = augmentEdges(kernelNodes.toList, kernelEdges0)
val kernelEdges = kernelEdges0
KernelGraph(kernelNodes.toSeq, kernelEdges.toSeq)
}).toVector
}
// TODO finishesμ node setμ symbolId κΈ°μ€μΌλ‘ μ λ ¬ν΄ λμΌλ©΄ λ λΉ λ₯΄κ² ν μ μμλ―
def reconstruct(): Option[R] = {
reconstruct(Kernel(grammar.startSymbol, 1, 0, input.length), input.length)
}
def reconstruct(kernel: Kernel, gen: Int): Option[R] = {
if (kernel.pointer > 0 && (finishes(gen).nodes contains kernel)) Some(reconstruct(kernel, gen, Set())) else None
}
private def reconstruct(kernel: Kernel, gen: Int, traces: Set[(Int, Int)]): R = {
// println("reconstruct", kernel, gen, traces)
assert(finishes(gen).nodes contains kernel)
assert(kernel.endGen == gen)
def reconstruct0(child: Kernel, childGen: Int): R = {
val newTraces: Set[(Int, Int)] =
if ((kernel.beginGen, gen) != (child.beginGen, childGen)) Set()
else traces + ((kernel.symbolId, kernel.pointer))
reconstruct(child, childGen, newTraces)
}
grammar.symbolOf(kernel.symbolId) match {
case symbol: NAtomicSymbol if traces contains ((kernel.symbolId, kernel.pointer)) =>
// println("cyclicBind?")
resultFunc.cyclicBind(kernel.beginGen, gen, symbol)
case symbol: NSequence if traces contains ((kernel.symbolId, kernel.pointer)) =>
// println(s"sequence cyclicBind - $kernel")
resultFunc.sequence(kernel.beginGen, gen, symbol, kernel.pointer)
case symbol@NSequence(_, _, sequence) =>
if (sequence.isEmpty) {
assert(kernel.pointer == 0 && kernel.beginGen == kernel.endGen && kernel.beginGen == gen)
resultFunc.bind(kernel.beginGen, gen, symbol, resultFunc.sequence(kernel.beginGen, kernel.endGen, symbol, 0))
} else if (kernel.pointer == 0) {
assert(kernel.beginGen == kernel.endGen)
resultFunc.sequence(kernel.beginGen, kernel.endGen, symbol, 0)
} else {
val (symbolId, prevPointer) = (kernel.symbolId, kernel.pointer - 1)
val prevKernels = finishes(gen).nodes filter { kern =>
(kern.symbolId == symbolId) && (kern.pointer == prevPointer) && (kern.beginGen == kernel.beginGen)
}
val trees = prevKernels.sortBy(_.tuple) flatMap { prevKernel =>
val childKernel = Kernel(sequence(prevPointer), 1, prevKernel.endGen, gen)
if (finishes(gen).nodes contains childKernel) {
val precedingTree = reconstruct0(Kernel(kernel.symbolId, prevPointer, kernel.beginGen, prevKernel.endGen), prevKernel.endGen)
val childTree = reconstruct0(childKernel, gen)
// println(s"preceding: $precedingTree")
// println(s"child: $childTree")
Some(resultFunc.append(precedingTree, childTree))
} else None
}
val appendedSeq = resultFunc.merge(trees)
if (kernel.pointer == sequence.length) resultFunc.bind(kernel.beginGen, gen, symbol, appendedSeq) else appendedSeq
}
case symbol@NJoin(_, _, body, join) =>
assert(kernel.pointer == 1)
val bodyKernel = Kernel(body, 1, kernel.beginGen, kernel.endGen)
val joinKernel = Kernel(join, 1, kernel.beginGen, kernel.endGen)
val bodyTree = reconstruct0(bodyKernel, kernel.endGen)
val joinTree = reconstruct0(joinKernel, kernel.endGen)
resultFunc.join(kernel.beginGen, kernel.endGen, symbol, bodyTree, joinTree)
case symbol: NTerminal =>
resultFunc.bind(kernel.beginGen, kernel.endGen, symbol,
resultFunc.terminal(kernel.beginGen, input(kernel.beginGen)))
case symbol: NAtomicSymbol =>
assert(kernel.pointer == 1)
// val prevKernel = Kernel(kernel.symbolId, 0, kernel.beginGen, kernel.beginGen)
// assert(finishes(gen).edgesByStart(prevKernel) forall { _.isInstanceOf[SimpleKernelEdge] })
// val bodyKernels = finishes(gen).edgesByStart(prevKernel) collect {
// case KernelEdge(_, end) if end.endGen == gen && end.isFinal(grammar) => end
// }
def lastKernel(symbolId: Int) =
Kernel(symbolId, Kernel.lastPointerOf(grammar.symbolOf(symbolId)), kernel.beginGen, gen)
val bodyKernels0: Set[Kernel] = grammar.nsymbols(kernel.symbolId) match {
case deriver: NSimpleDerive => deriver.produces.map(lastKernel)
case NGrammar.NExcept(_, _, body, _) => Set(lastKernel(body))
case NGrammar.NLongest(_, _, body) => Set(lastKernel(body))
case symbol: NGrammar.NLookaheadSymbol => Set(lastKernel(symbol.emptySeqId))
case _: NTerminal | _: NJoin => assert(false); ???
}
val bodyKernels = (bodyKernels0 intersect finishes(gen).nodes.toSet).toList
val bodyTrees = bodyKernels.sortBy(_.tuple) map { bodyKernel =>
reconstruct0(bodyKernel, kernel.endGen)
}
assert(bodyTrees.nonEmpty)
resultFunc.bind(kernel.beginGen, kernel.endGen, symbol, resultFunc.merge(bodyTrees))
}
}
}
|
Joonsoo/moon-parser
|
naive/src/main/scala/com/giyeok/jparser/nparser/ParseTreeConstructor.scala
|
Scala
|
mit
| 8,240
|
package org.jetbrains.jps.incremental.scala.model.impl
import org.jetbrains.jps.incremental.scala.model.JpsSbtExtensionService
import org.jetbrains.jps.model.java.impl.JpsJavaDependenciesEnumerationHandler
import org.jetbrains.jps.model.module.JpsModule
import java.util
import scala.jdk.CollectionConverters.IterableHasAsScala
/**
* ATTENTION: implementation should be in sync with<br>
* org.jetbrains.sbt.execution.SbtOrderEnumeratorHandler
*/
final class JpsSbtDependenciesEnumerationHandler extends JpsJavaDependenciesEnumerationHandler {
override def shouldAddRuntimeDependenciesToTestCompilationClasspath: Boolean =
true
override def shouldIncludeTestsFromDependentModulesToTestClasspath: Boolean =
super.shouldIncludeTestsFromDependentModulesToTestClasspath
override def shouldProcessDependenciesRecursively: Boolean =
super.shouldProcessDependenciesRecursively
}
object JpsSbtDependenciesEnumerationHandler {
private val Instance = new JpsSbtDependenciesEnumerationHandler
final class SbtFactory extends JpsJavaDependenciesEnumerationHandler.Factory {
override def createHandler(modules: util.Collection[JpsModule]): JpsJavaDependenciesEnumerationHandler = {
val service = JpsSbtExtensionService.getInstance
val extension = modules.asScala.iterator.flatMap(service.getExtension).nextOption()
extension.map(_ => Instance).orNull
}
}
}
|
JetBrains/intellij-scala
|
scala/compiler-jps/src/org/jetbrains/jps/incremental/scala/model/impl/JpsSbtDependenciesEnumerationHandler.scala
|
Scala
|
apache-2.0
| 1,403
|
package steve.client
import domain.{Item, Job}
import utils.JsonUtils
import scalaj.http.BaseHttp
case class ClientException(private val message: String = "",
private val cause: Throwable = None.orNull)
extends Exception(message, cause)
case class ItemBatch(items: List[Map[String, Any]]) {
def +(another: ItemBatch) = ItemBatch(items ++ another.items)
}
object ItemBatch {
def apply(jobId: String, status: String, attributes: Map[String, String]): ItemBatch = {
ItemBatch(List(Map[String, Any]("jobId" -> jobId, "status" -> status, "attributes" -> attributes)))
}
def apply(id: String, jobId: String, status: String, attributes: Map[String, String]): ItemBatch = {
ItemBatch(List(Map[String, Any]("id" -> id, "jobId" -> jobId, "status" -> status, "attributes" -> attributes)))
}
}
//TODO: Handle retries in the client logic
class SteveClient(httpClient: BaseHttp, host: String) {
val connectionTimeoutInMillis = 10000
val readTimeoutInMillis = 30000
def addJob(appName: String, state: String, attributes: Map[String, String]): Option[String] = {
val jsonInput = Map[String, Any]("appName" -> appName, "state" -> state, "attributes" -> attributes)
val data = JsonUtils.toJson(jsonInput)
val response = httpClient(s"$host/job")
.postData(data)
.header("content-type", "application/json")
.method("PUT")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val jobInfo = JsonUtils.fromJson[Map[String, String]](response.body)
jobInfo.get("id")
}
def getJob(jobId: String): Job = {
val response = httpClient(s"$host/job/$jobId")
.method("GET")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val jobInfo = JsonUtils.fromJson[Job](response.body)
jobInfo
}
def getJobIdsByState(state: String) = {
val response = httpClient(s"$host/job?state=$state")
.method("GET")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
JsonUtils.fromJson[List[String]](response.body)
}
def updateJobState(jobId: String, state: String): Boolean = {
val response = httpClient(s"$host/job/${jobId.toString}/state")
.postData(state)
.header("content-type", "application/json")
.method("POST")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
response.is2xx
}
def deleteJob(jobId: String): Option[String] = {
val response = httpClient(s"$host/job/$jobId")
.method("DELETE")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val jobInfo = JsonUtils.fromJson[Map[String, String]](response.body)
jobInfo.get("rowsAffected")
}
def getJobStats(jobId: String): Map[String, Int] = {
val response = httpClient(s"$host/job/$jobId/stats")
.method("GET")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val jobStats = JsonUtils.fromJson[Map[String, Int]](response.body)
jobStats
}
def addItem(jobId: String, status: String, attributes: Map[String, String]): Option[String] = {
val jsonInput = Map[String, Any]("jobId" -> jobId, "status" -> status, "attributes" -> attributes)
val data = JsonUtils.toJson(jsonInput)
val response = httpClient(s"$host/item")
.postData(data)
.header("content-type", "application/json")
.method("PUT")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val itemInfo = JsonUtils.fromJson[Map[String, String]](response.body)
itemInfo.get("id")
}
def addItems(batch: ItemBatch): Boolean = {
val data = JsonUtils.toJson(batch.items)
val response = httpClient(s"$host/item/bulk")
.postData(data)
.header("content-type", "application/json")
.method("PUT")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
response.is2xx
}
def getItem(itemId: String): Item = {
val response = httpClient(s"$host/item/$itemId")
.method("GET")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val itemInfo = JsonUtils.fromJson[Item](response.body)
itemInfo
}
private def checkItemByStatus(qpString: String) = {
val response = httpClient(s"$host/item/status?$qpString")
.method("HEAD")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
if (response.is2xx)
true
else if (response.is4xx)
false
else
throw ClientException(response.toString)
}
def checkIfItemsWithStatus(jobId: String, status: String) = {
checkItemByStatus(s"jobId=$jobId&status=$status")
}
def checkIfItemsWithoutStatus(jobId: String, status: String) = {
//NOTE: There's a negation(`!`) operator on the status value
checkItemByStatus(s"jobId=$jobId&status=!$status")
}
def updateItemStatus(itemId: String, status: String): Boolean = {
val response = httpClient(s"$host/item/$itemId/status")
.postData(status)
.header("content-type", "application/json")
.method("POST")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
response.is2xx
}
def updateItemStatus(queryAttrs: Map[String, String], queryStatus: Option[String], updateStatus: String): Boolean = {
val qsMap = queryStatus.map(status => Map("status" -> status)).getOrElse(Map()) ++ queryAttrs
val queryString = qsMap.map {
case (key, value) => key + "=" + value
}.mkString("&")
val response = httpClient(s"$host/item/status?$queryString")
.postData(updateStatus)
.header("content-type", "application/json")
.method("POST")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
response.is2xx
}
def deleteItem(itemId: String): Option[String] = {
val response = httpClient(s"$host/item/$itemId")
.method("DELETE")
.timeout(connTimeoutMs = connectionTimeoutInMillis, readTimeoutMs = readTimeoutInMillis)
.asString
val itemInfo = JsonUtils.fromJson[Map[String, String]](response.body)
itemInfo.get("rowsAffected")
}
}
|
ind9/steve
|
steve-client-scala/src/main/scala/steve/client/SteveClient.scala
|
Scala
|
apache-2.0
| 6,527
|
import java.io.File
import testgen.TestSuiteBuilder.{fromLabeledTestFromInput, _}
import testgen._
object AllergiesTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/allergies.json")
def toAlergenEnum(s: String): String =
"Allergen." + s.toLowerCase.capitalize
def toAllergicToExpected(expected: CanonicalDataParser.Expected): List[(String, Boolean)] = {
expected match {
case Right(xs: List[Map[String, Any]]) =>
xs.map(f => (toAlergenEnum(f("substance").asInstanceOf[String]),
f("result").asInstanceOf[Boolean]))
case _ => throw new IllegalStateException
}
}
def toListExpected(expected: CanonicalDataParser.Expected): String = {
expected match {
case Right(xs: List[String]) => s"List(${xs.map(toAlergenEnum).mkString(", ")})"
case _ => throw new IllegalStateException
}
}
def sutArgs(parseResult: CanonicalDataParser.ParseResult, argNames: String*): String =
argNames map (name => TestSuiteBuilder.toString(parseResult(name))) mkString ", "
def getScore(labeledTest: LabeledTest): Int =
labeledTest.result("input").asInstanceOf[Map[String, Any]]("score").asInstanceOf[Int]
def fromLabeledTestFromInput(argNames: String*): ToTestCaseDataList =
withLabeledList { sut =>
labeledTest =>
val score = getScore(labeledTest)
val property = labeledTest.property
if ("allergicTo".equals(property)) {
val expected: List[(String, Boolean)] = toAllergicToExpected(labeledTest.expected)
expected.map(e => {
val sutCall =
s"""$sut.$property(${e._1}, $score)"""
val result = e._2.toString
TestCaseData(s"${e._1} - ${labeledTest.description}", sutCall, result)
})
} else {
val args = sutArgsFromInput(labeledTest.result, "score")
val expected = toListExpected(labeledTest.expected)
val sutCall =
s"""$sut.$property($args)"""
List(TestCaseData(labeledTest.description, sutCall, expected))
}
}
val code = TestSuiteBuilder.buildFromList(file, fromLabeledTestFromInput("score"))
println(s"-------------")
println(code)
println(s"-------------")
}
}
|
ricemery/xscala
|
testgen/src/main/scala/AllergiesTestGenerator.scala
|
Scala
|
mit
| 2,365
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalautils
import org.scalatest._
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
class EqualitySpec extends Spec with NonImplicitAssertions {
object `The Equality companion object` {
def `should offer a factory method for NormalizingEqualities` {
import StringNormalizations._
assert(Equality(lowerCased).areEqual("howdy", "HOWDY"))
assert(Equality(lowerCased and trimmed).areEqual(" howdy", "HOWDY "))
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalautils/EqualitySpec.scala
|
Scala
|
apache-2.0
| 1,220
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.check.body
import java.nio.charset.StandardCharsets._
import scala.collection.mutable
import org.mockito.Mockito._
import io.gatling.{ BaseSpec, ValidationValues }
import io.gatling.core.CoreDsl
import io.gatling.core.check.CheckResult
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session._
import io.gatling.http.HttpDsl
import io.gatling.http.check.HttpCheckSupport
import io.gatling.http.response.{ Response, StringResponseBody }
class HttpBodyRegexCheckSpec extends BaseSpec with ValidationValues with CoreDsl with HttpDsl {
object RegexSupport extends HttpCheckSupport
implicit val configuration = GatlingConfiguration.loadForTest()
implicit val provider = HttpBodyRegexProvider
implicit def cache: mutable.Map[Any, Any] = mutable.Map.empty
val session = Session("mockSession", 0)
val regexCheck = super[CoreDsl].regex(_)
private def mockResponse(body: String) = {
val response = mock[Response]
when(response.body) thenReturn new StringResponseBody(body, UTF_8)
response
}
"regex.find.exists" should "find single result" in {
val response = mockResponse("""{"id":"1072920417"}""")
regexCheck(""""id":"(.+?)"""").find.exists.check(response, session).succeeded shouldBe CheckResult(Some("1072920417"), None)
}
it should "find first occurrence" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
regexCheck(""""id":"(.+?)"""").find.exists.check(response, session).succeeded shouldBe CheckResult(Some("1072920417"), None)
}
"regex.findAll.exists" should "find all occurrences" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
regexCheck(""""id":"(.+?)"""").findAll.exists.check(response, session).succeeded shouldBe CheckResult(Some(Seq("1072920417", "1072920418")), None)
}
it should "fail when finding nothing instead of returning an empty Seq" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
val regexValue = """"foo":"(.+?)""""
regexCheck(regexValue).findAll.exists.check(response, session).failed shouldBe s"regex($regexValue).findAll.exists, found nothing"
}
it should "fail with expected message when transforming" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
val regexValue = """"foo":"(.+?)""""
regexCheck(regexValue).findAll.transform(_.map(_ + "foo")).exists.check(response, session).failed shouldBe s"regex($regexValue).findAll.transform.exists, found nothing"
}
"regex.count.exists" should "find all occurrences" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
regexCheck(""""id":"(.+?)"""").count.exists.check(response, session).succeeded shouldBe CheckResult(Some(2), None)
}
it should "return 0 when finding nothing instead of failing" in {
val response = mockResponse("""[{"id":"1072920417"},"id":"1072920418"]""")
val regexValue = """"foo":"(.+?)""""
regexCheck(regexValue).count.exists.check(response, session).succeeded shouldBe CheckResult(Some(0), None)
}
}
|
timve/gatling
|
gatling-http/src/test/scala/io/gatling/http/check/body/HttpBodyRegexCheckSpec.scala
|
Scala
|
apache-2.0
| 3,751
|
object Test {
opaque type T = String
object T {
def unwrap(t: T): String = t
}
opaque type U = String
type W = U
object U {
def unwrap(w: W): String = w: U
}
}
|
lampepfl/dotty
|
tests/pos/i6003.scala
|
Scala
|
apache-2.0
| 182
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.api
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.ColumnGroups
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ShardStrategyTest extends Specification {
"ShardStrategy" should {
"handle negative hash values" in {
val sft = SimpleFeatureTypes.createType("hash", "geom:Point,dtg:Date;geomesa.z.splits=60")
val wrapper = WritableFeature.wrapper(sft, new ColumnGroups)
val sf = ScalaSimpleFeature.create(sft, "1371494157#3638946185",
"POINT (88.3176015 22.5988557)", "2019-12-23T01:00:00.000Z")
val writable = wrapper.wrap(sf)
val strategy = ShardStrategy(60)
strategy.apply(writable) must not(beNull)
}
}
}
|
aheyne/geomesa
|
geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/api/ShardStrategyTest.scala
|
Scala
|
apache-2.0
| 1,387
|
package jp.co.bizreach.s3scala
import org.scalatest.FunSuite
import java.io.File
class IOUtilsSuite extends FunSuite {
test("toInputStream and toBytes"){
val file = new File("README.md")
val in = IOUtils.toInputStream(file)
val bytes = IOUtils.toBytes(in)
assert(file.length == bytes.length)
}
test("deleteDirectory"){
val dir = new File("data")
dir.mkdir()
val file = new File(dir, "test.txt")
file.createNewFile()
assert(dir.exists())
assert(file.exists())
IOUtils.deleteDirectory(dir)
assert(!dir.exists())
assert(!file.exists())
}
}
|
bizreach/aws-s3-scala
|
src/test/scala/jp/co/bizreach/s3scala/IOUtilsSuite.scala
|
Scala
|
apache-2.0
| 605
|
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
import com.intellij.psi.stubs.StubElement
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
/**
* User: Alexander Podkhalyuzin
* Date: 20.06.2009
*/
trait ScImportExprStub extends StubElement[ScImportExpr] {
def referenceText: Option[String]
def reference: Option[ScStableCodeReferenceElement]
def isSingleWildcard: Boolean
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/ScImportExprStub.scala
|
Scala
|
apache-2.0
| 523
|
package us.feliscat.ner
import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import us.feliscat.m17n.MultiLingual
import us.feliscat.text.{StringNone, StringOption, StringSome}
import us.feliscat.time.{TimeMerger, TimeTmp}
import scala.collection.mutable.ListBuffer
/**
* <pre>
* Created on 2017/02/09.
* </pre>
*
* @author K.Sakamoto
*/
trait MultiLingualNamedEntityRecognizerInEventOntology extends NamedEntityRecognizer with MultiLingual {
override protected val recognizerName: String = "EventOntology"
override protected lazy val entityList: NEList = initialize
protected def extract(sentence: StringOption): Seq[TimeTmp]
protected def eventOntologyClassFiles: Array[File]
protected override def initialize: NEList = {
val neBuffer = ListBuffer.empty[NE]
eventOntologyClassFiles foreach {
file: File =>
val fileName: NEFile = file.getName
val reader: java.util.Iterator[String] = Files.newBufferedReader(file.toPath, StandardCharsets.UTF_8).lines.iterator
while (reader.hasNext) {
val line: NELine = reader.next
val metaInfo: MetaInfo = (fileName, line)
val elements: Array[String] = line.split(',')
if (5 < elements.length) {
val years: Seq[StringOption] = StringOption(elements(2)) :: StringOption(elements(3)) :: Nil
val time: TimeTmp = TimeMerger.union(
for (year <- years) yield {
TimeMerger.union(extract(year))
}
)
for (i <- elements.indices) {
if (!((0 :: 2 :: 3 :: Nil) contains i)) {
normalize(StringOption(elements(i).trim)) match {
case StringSome(str) =>
val synonyms: Array[String] = str.split('@')
synonyms foreach {
synonym: String =>
StringOption(synonym.trim) match {
case StringSome(text) =>
val ne: NE = (text, metaInfo, time, synonyms)
neBuffer += ne
case StringNone =>
//Do nothing
}
}
case StringNone =>
//Do nothing
}
}
}
}
}
}
neBuffer.result
}
}
|
ktr-skmt/FelisCatusZero-multilingual
|
libraries/src/main/scala/us/feliscat/ner/MultiLingualNamedEntityRecognizerInEventOntology.scala
|
Scala
|
apache-2.0
| 2,430
|
package ch.bsisa.hyperbird.patman.simulations.model
import java.util.Date
/**
* Models an hospital
*/
case class Hospital(code:String, schedule:Date, beds:List[Bed]) {
}
|
bsisa/hb-api
|
app/ch/bsisa/hyperbird/patman/simulations/model/Hospital.scala
|
Scala
|
gpl-2.0
| 175
|
package com.basrikahveci
package cardgame.messaging.request
import cardgame.messaging.Request
import cardgame.core.{OnlineUsers, Session}
import cardgame.domain.User
import cardgame.core.db.QueryEvaluatorContainer
import compat.Platform
import java.sql.Timestamp
import org.apache.commons.codec.digest.DigestUtils
object SignInRequest {
val SECRET = "oyfarfara"
}
class PersistentUserData(val userId: Long, val points: Int, val wins: Int, val loses: Int, val leaves: Int)
class SignInRequest(val userId: Long, val name: String, val friends: Array[Long], val signature: String) extends Request with OnlineUsers {
def handle(session: Session, user: User) {
if (!session.isUserSignedIn) {
val signature = DigestUtils.sha256Hex(name + SignInRequest.SECRET)
if (signature == this.signature) {
loadUser match {
case Some(userData) =>
signInWith(session, userData)
case None =>
val userData = new PersistentUserData(userId, 1000, 0, 0, 0)
persist(userData)
signInWith(session, userData)
}
} else {
session close true
}
} else {
session close true
}
}
def loadUser = {
var userDataOption: Option[PersistentUserData] = None
QueryEvaluatorContainer.queryEvaluator.selectOne(" SELECT points, wins, loses, leaves FROM users WHERE id = ? ", userId) {
row =>
userDataOption = Some(new PersistentUserData(userId, row.getInt("points"), row.getInt("wins"), row.getInt("loses"), row.getInt("leaves")))
}
userDataOption
}
def persist(userData: PersistentUserData) = QueryEvaluatorContainer.queryEvaluator.insert(" INSERT into users(id, registration_time, points) VALUES(?, ?, ?) ", userData.userId, new Timestamp(Platform.currentTime), userData.points)
def signInWith(session: Session, userData: PersistentUserData) = settle(new User(userId, name, friends, session, userData.points, userData.wins, userData.loses, userData.leaves))
}
|
metanet/cardgame-server-scala
|
src/main/scala/com/basrikahveci/cardgame/messaging/request/SignInRequest.scala
|
Scala
|
mit
| 2,005
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump
import scala.language.implicitConversions
import akka.actor.{Actor, Props, Terminated}
import akka.testkit.TestProbe
object TestProbeUtil {
implicit def toProps(probe: TestProbe): Props = {
Props(new Actor {
val probeRef = probe.ref
context.watch(probeRef)
def receive: Receive = {
case Terminated(probeRef) => context.stop(self)
case x => probeRef.forward(x)
}
})
}
}
|
manuzhang/incubator-gearpump
|
core/src/test/scala/org/apache/gearpump/TestProbeUtil.scala
|
Scala
|
apache-2.0
| 1,260
|
/**
* The MIT License
*
* Copyright (c) 2011 Benjamin Klum
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.helgoboss.commons_scala
import java.io._
import io.Source
import OperatingSystemClass._
/**
* Contains convenience methods for coping with temporary files and common system paths.
*/
object RichFile {
/**
* Creates and returns a temporary directory.
*
* @param prefix how the name of the directory should start
* @param suffix how the name of the directory should end, defaults to an empty String
* @return File object representing the created directory
*/
def createTempDir(prefix: String, suffix: String = "") = {
val tmpDir = File.createTempFile(prefix, suffix)
tmpDir.delete()
tmpDir.mkdir()
tmpDir
}
/**
* Works exactly like [[java.io.File]]`.createTempFile()` but you don't have to provide the suffix.
*
* @param prefix how the name of the file should start
* @param suffix how the name of the file should end, defaults to an empty String
* @return File object representing the created file
*/
def createTempFile(prefix: String, suffix: String = "") = {
File.createTempFile(prefix, suffix)
}
/**
* Works like [[java.io.File]]`.createTempFile()` but additionally makes sure that the file doesn't exist.
*
* @param prefix how the name of the file should start
* @param suffix how the name of the file should end, defaults to an empty String
* @return File object pointing to a not yet existing file
*/
def createNonExistingTempFile(prefix: String, suffix: String = "") = {
val tmpFile = File.createTempFile(prefix, suffix)
tmpFile.delete()
tmpFile
}
/**
* Returns the system directory for temporary files.
*
* Just wraps the value of the system property "java.io.tmpdir" in a File.
*/
def tempDir = new File(System.getProperty("java.io.tmpdir"))
/**
* Returns the user's home directory.
*
* Just wraps the value of the system property "user.home" in a File.
*/
def userHomeDir = new File(System.getProperty("user.home"))
}
/**
* File wrapper which provides various convenience methods for coping with files.
*/
class RichFile(file: File) {
/**
* Returns a [[org.helgoboss.commons_scala.Path]] representing the file's location.
*
* @note [[org.helgoboss.commons_scala.Path]] don't distinguish between relative and absolute
* paths, so a leading slash gets lost
*/
lazy val path = Path(file.getPath.replace("""\\""", "/"))
/**
* Returns the path expression in Unix style.
*
* No file system access is made to create the expression. Symbolic links are
* not resolved.
*
* A slash is used as separator. A trailing slash is inserted if the only
* path component is `X:` where `X` is supposed to be a drive letter. Cygwin
* paths are converted to its Windows equivalent. This path expression style
* is appropriate for Unix-only software and Non-Cygwin Windows software
* which prefers the slash even on Windows.
*
* == Example ==
* {{{
* scala> import org.helgoboss.commons_scala.Implicits._
* import org.helgoboss.commons_scala.Implicits._
*
* scala> import java.io.File
* import java.io.File
*
* scala> new File("""c:\\""").unixPathExpression
* res0: java.lang.String = C:/
*
* scala> new File("/cygdrive/c").unixPathExpression
* res1: java.lang.String = C:/
* }}}
*/
lazy val unixPathExpression = {
if (isAbsoluteOnUnixOrWindows) {
/* Absolute path */
driveLetter match {
case Some(l) =>
l.toUpperCase + ":/" + pathAfterDriveLetter.components.mkString("/")
case None =>
"/" + path.components.mkString("/")
}
} else {
/* Relative path */
path.components.mkString("/")
}
}
/**
* Returns the path expression in Windows style.
*
* No file system access is made to create the expression. Symbolic links are
* not resolved.
*
* A backslash is used as separator. A trailing backslash is inserted if the only
* path component is `X:` where `X` is supposed to be a drive letter. Cygwin
* paths are converted to its Windows equivalent. This path expression style
* is appropriate for Windows-only software and Unix software which prefers
* the backslash, even on Unix.
*
* == Example ==
* {{{
* scala> import org.helgoboss.commons_scala.Implicits._
* import org.helgoboss.commons_scala.Implicits._
*
* scala> import java.io.File
* import java.io.File
*
* scala> new File("""C:""").windowsPathExpression
* res0: java.lang.String = C:\\
*
* scala> new File("""C:\\Program Files""").windowsPathExpression
* res1: java.lang.String = C:\\Program Files
*
* scala> new File("""/cygdrive/c""").windowsPathExpression
* res2: java.lang.String = C:\\
* }}}
*/
lazy val windowsPathExpression = {
if (isAbsoluteOnUnixOrWindows) {
/* Absolute path */
driveLetter match {
case Some(l) =>
l.toUpperCase + ":\\\\" + pathAfterDriveLetter.components.mkString("\\\\")
case None =>
"\\\\" + path.components.mkString("\\\\")
}
} else {
/* Relative path */
path.components.mkString("\\\\")
}
}
/**
* Returns the path expression in Unix style if the current platform is a Unix system
* and in Windows if the current platform is a Windows system.
*
* Use this method for
* passing paths to cross-platform software which prefers the native style on each system.
* This is similar to [[java.io.File]] `.getCanonicalFile()`. However, latter doesn't care
* about converting Cygwin paths into native Windows paths, might access the file system
* and resolves symbolic links.
*/
lazy val osDependentPathExpression = CurrentPlatform.osClass match {
case Windows => windowsPathExpression
case Unix => unixPathExpression
}
/**
* Like `unixPathExpression` but converts `X:/foo` to `/cygdrive/x/foo`. Use this method
* for passing paths to Unix software or to Windows software based on Cygwin.
*/
lazy val cygwinCompatibleUnixPathExpression = {
if (isAbsoluteOnUnixOrWindows) {
/* Absolute path */
driveLetter match {
case Some(l) =>
val base = "/cygdrive/" + l.toLowerCase
if (pathAfterDriveLetter.components.isEmpty)
base
else
base + "/" + pathAfterDriveLetter.components.mkString("/")
case None => "/" + path.components.mkString("/")
}
} else {
/* Relative path */
path.components.mkString("/")
}
}
/**
* Returns a new file created by appending the given right path to this file. Should be invoked as infix operator.
*
* @param childPath path to append
* @return new file
*/
def /(childPath: Path) = new File(file, childPath.toString)
/**
* Writes the given String into this file overwriting any previous content.
*
* @param content String to write
*/
def content_=(content: String) {
val fw = new FileWriter(file)
try {
fw.write(content)
} finally {
fw.close()
}
}
/**
* Makes sure this file points to an empty directory. If it doesn't exist yet, the directory will be created.
* If this file already exists, it will be deleted. If it's a directory, the complete directory tree will be
* deleted!
*/
def makeSureDirExistsAndIsEmpty() {
if (file.exists)
deleteRecursively()
file.mkdirs()
}
/**
* Makes sure this file points to a directory. If it isn't, the directory will be created. If the file already
* exists and is no directory, an exception will be thrown.
*/
def makeSureDirExists() {
if (file.exists) {
if (!file.isDirectory) {
sys.error("File with this name is existing but is not a directory")
}
} else {
file.mkdirs()
}
}
/**
* If this File object points to a directory, this methods deletes the directory recursively. If it points to a file,
* it just deletes the file.
*
* @return `true` if the deletion was successful, `false` otherwise
*/
def deleteRecursively(): Boolean = {
def deleteFile(f: File): Boolean = {
if (f.isDirectory) {
f.listFiles foreach { deleteFile }
}
f.delete()
}
deleteFile(file)
}
/**
* Returns the String content of this file, assuming it is a text file having platform encoding.
*/
def content: String = {
val source = Source.fromFile(file)
try
source.mkString
finally
source.close()
}
/**
* Returns a [[org.helgoboss.commons_scala.FileTreeIterator]] over this directory. Extremely useful
* if you want to dive into directory trees.
*
* @see [[org.helgoboss.commons_scala.FileTreeIterator]]
*/
def tree = new FileTreeIterator(root = file)
/**
* Returns a [[org.helgoboss.commons_scala.FileTreeIterator]] over this directory that traverses
* directory in the given default order.
*
* @param defaultWalkOrder order function denoting in which order to traverse files in a directory by default
*/
def tree(defaultWalkOrder: Option[(File, File) => Boolean]) =
new FileTreeIterator(root = file, defaultWalkOrder = defaultWalkOrder)
private lazy val isAbsoluteOnUnixOrWindows = {
file.getPath.startsWith("/") || file.getPath.startsWith("""\\""") ||
(path.components.headOption exists { _ contains ":" })
}
private case class DrivePath(driveLetter: String, remainder: Path)
/**
* Assumes path is absolute on unix or windows.
*/
private def cygwinDrivePath = {
if (path.components.size >= 2 && path.components.head == "cygdrive") {
val CygwinDriveLetterPattern = "([a-z])".r
path.components.tail.head match {
case CygwinDriveLetterPattern(letter) => Some(DrivePath(letter, Path(path.components.drop(2))))
case _ => None
}
} else {
None
}
}
/**
* Assumes path is absolute on unix or windows. Returns lowercase letter.
*/
private def windowsDrivePath = {
if (path.components.size >= 1) {
val WindowsDriveLetterPattern = "([A-Za-z]):".r
path.components.head match {
case WindowsDriveLetterPattern(letter) => Some(DrivePath(letter.toLowerCase, Path(path.components.tail)))
case _ => None
}
} else {
None
}
}
private lazy val drivePath = cygwinDrivePath orElse windowsDrivePath
private def driveLetter = drivePath map { _.driveLetter }
private def pathAfterDriveLetter = drivePath match {
case Some(dp) => dp.remainder
case None => Path(Nil)
}
}
|
helgoboss/commons-scala
|
src/main/scala/org/helgoboss/commons_scala/RichFile.scala
|
Scala
|
mit
| 11,712
|
package com.themillhousegroup.l7.xml
import scala.xml._
import scala.xml.transform.{ RuleTransformer, RewriteRule }
object NodeChanger {
/** Generates a RewriteRule that will put newValue into anything matching the xPathExpression */
def rewrite(xPathExpression: => Seq[Node], newValue: Elem): RewriteRule = {
new RewriteRule {
def innerTransform(n: Node): Node = n match {
case elem @ Elem(_, _, _, _, child @ _*) if xPathExpression.contains(elem) => newValue
case elem @ Elem(_, _, _, _, child @ _*) => elem.asInstanceOf[Elem].copy(child = child map innerTransform)
case _ => n
}
override def transform(n: Node) = innerTransform(n)
}
}
/** Actually perform a conversion on the Node */
def convertNodeAt(doc: Node, xPathExpression: => Seq[Node], newValue: Elem): Elem = {
val rewriteRule = rewrite(xPathExpression, newValue)
new RuleTransformer(rewriteRule).transform(doc).head.asInstanceOf[Elem]
}
}
|
themillhousegroup/l7-merge
|
src/main/scala/com/themillhousegroup/l7/xml/NodeChanger.scala
|
Scala
|
mit
| 980
|
package ammonite.runtime.tools
import java.io.PrintWriter
import ammonite.util.Printer
import coursier._
import coursier.core.{Authentication => CoursierAuthentication}
import coursier.ivy.IvyRepository
import coursier.maven.MavenRepository
import scalaz.{-\\/, \\/-}
import scalaz.concurrent.Task
object DependencyConstructor extends DependencyConstructor
trait DependencyConstructor{
implicit class GroupIdExt(groupId: String){
def %(artifactId: String) = (groupId, artifactId)
def %%(artifactId: String) = (groupId, artifactId + "_" + DependencyThing.scalaBinaryVersion)
}
implicit class ArtifactIdExt(t: (String, String)){
def %(version: String) = (t._1, t._2, version)
}
}
/**
* Resolve artifacts from Ivy. Originally taken from
*
* http://makandracards.com/evgeny-goldin/5817-calling-ivy-from-groovy-or-java
*
* And transliterated into Scala. I have no idea how or why it works.
*/
class DependencyThing(repositories: () => List[Resolver], printer: Printer, verboseOutput: Boolean) {
def exceptionMessage(conflicts: Seq[String], failed: Seq[String], converged: Boolean) =
Seq(
if (conflicts.isEmpty)
Nil
else
Seq(s"Conflicts:\\n" + conflicts.map(" " + _).mkString("\\n")),
if (failed.isEmpty)
Nil
else
Seq(s"failed to resolve dependencies:\\n" + failed.map(" " + _).mkString("\\n")),
if (converged)
Nil
else
Seq("Did not converge")
).flatten.mkString("\\n")
case class IvyResolutionException(conflicts: Seq[String], failed: Seq[String], converged: Boolean) extends Exception(
exceptionMessage(conflicts, failed, converged)
)
def resolveArtifacts(coordinates: Seq[(String, String, String)],
previousCoordinates: Seq[(String, String, String)],
exclusions: Seq[(String, String)],
profiles: Set[String]) = synchronized {
val deps = coordinates.map {
case (groupId, artifactId, version) =>
Dependency(Module(groupId, artifactId), version)
}
val previousDeps = previousCoordinates.map { case (org, name, ver) => Dependency(Module(org, name), ver) }
val start = Resolution(
(previousDeps ++ deps).map { dep0 =>
dep0.copy(
exclusions = dep0.exclusions ++ exclusions
)
}.toSet,
userActivations = if (profiles.isEmpty) None else Some(profiles.map(_ -> true).toMap)
)
val metadataLogger = new TermDisplay(new PrintWriter(System.out))
val fetch = Fetch.from(
repositories().map(_()),
Cache.fetch(cachePolicy = CachePolicy.default.head, logger = Some(metadataLogger)),
CachePolicy.default.tail.map(p =>
Cache.fetch(cachePolicy = p, logger = Some(metadataLogger))
): _*
)
metadataLogger.init()
val res =
try start.process.run(fetch, maxIterations = 200).unsafePerformSync
finally metadataLogger.stop()
if (!res.isDone || res.errors.nonEmpty || res.conflicts.nonEmpty)
throw IvyResolutionException(
res.conflicts.toVector.map(_.toString).sorted,
res.errors.map { case (dep, errors) => s"$dep: ${errors.mkString(", ")}" },
res.isDone
)
val artifactLogger = new TermDisplay(new PrintWriter(System.out))
artifactLogger.init()
val types = Set("jar", "bundle")
val a =
try {
Task.gatherUnordered(res.dependencyArtifacts.map(_._2).filter(a => types(a.`type`)).map { artifact =>
def fetch(p: CachePolicy) =
Cache.file(artifact, logger = Some(artifactLogger), cachePolicy = p)
(fetch(CachePolicy.default.head) /: CachePolicy.default.tail)(_ orElse fetch(_))
.run
.map(artifact -> _)
}).unsafePerformSync
}
finally
artifactLogger.stop()
val downloadErrors = a.collect { case (artifact, -\\/(err)) => (artifact, err) }
if (downloadErrors.nonEmpty)
throw IvyResolutionException(
Nil,
downloadErrors.map { case (artifact, err) => s"${artifact.url}: ${err.describe}" },
converged = true
)
a.collect { case (_, \\/-(f)) => f }
}
}
object DependencyThing {
val scalaBinaryVersion =
scala.util.Properties
.versionString
.stripPrefix("version ")
.split('.')
.take(2)
.mkString(".")
}
object Resolvers {
// this pattern comes from sbt.Resolver
val IvyPattern: String =
"[organisation]/[module]/(scala_[scalaVersion]/)(sbt_[sbtVersion]/)"+
"[revision]/[type]s/[artifact](-[classifier]).[ext]"
// this pattern comes from IBiblioResolver
val MavenPattern: String =
"[organisation]/[module]/" +
"[revision]/[artifact]-[revision](-[classifier]).[ext]"
// this pattern comes from IBiblioResolver
val DefaultPattern: String =
"[module]/[type]s/[artifact]-[revision].[ext]"
lazy val defaultResolvers: List[Resolver] = List(
Resolver.File(
"local",
"/.ivy2/local",
"/[organisation]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]",
m2 = false
),
Resolver.Http(
"central",
"https://repo1.maven.org/maven2/",
MavenPattern,
m2 = true
),
Resolver.Http(
"sonatype-releases",
"https://oss.sonatype.org/content/repositories/releases/",
MavenPattern,
m2 = true
)
)
}
case class Authentication(user: String, password: String) {
override def toString: String = s"Authentication($user, *******)"
}
/**
* A thin wrapper around [[Repository]], which wraps them and provides
* hashability in order to set the cache tags. This lets us invalidate the ivy
* resolution cache if the set of resolvers changes
*/
sealed trait Resolver{
def apply(): Repository
}
object Resolver{
case class File(name: String, root: String, pattern: String, m2: Boolean) extends Resolver{
def apply() = {
val testRepoDir = new java.io.File(sys.props("user.home") + root).toURI.toString
if (m2)
MavenRepository(testRepoDir, changing = None)
else
IvyRepository.parse(testRepoDir + pattern).getOrElse {
throw new Exception(s"Error parsing Ivy pattern $testRepoDir$pattern")
}
}
}
case class Http(name: String, root: String, pattern: String, m2: Boolean,
authentication: Option[Authentication] = None) extends Resolver{
def apply() = {
val coursierAuthentication = authentication.map(auth => CoursierAuthentication(auth.user, auth.password))
if (m2)
MavenRepository(root, changing = None,
authentication = coursierAuthentication)
else
IvyRepository.parse((root + pattern).replace("[ivyPattern]", Resolvers.IvyPattern),
authentication = coursierAuthentication).getOrElse {
throw new Exception(s"Error parsing Ivy pattern $root$pattern")
}
}
}
}
|
alexarchambault/ammonium
|
amm/runtime/src/main/scala/ammonite/runtime/tools/DependencyThing.scala
|
Scala
|
mit
| 6,969
|
package com.seanshubin.contract.test
import java.io.{Console, InputStream, PrintStream}
import java.nio.channels.Channel
import java.util
import java.util.Properties
import com.seanshubin.contract.domain.SystemContract
trait SystemNotImplemented extends SystemContract {
override def in: InputStream = ???
override def out: PrintStream = ???
override def err: PrintStream = ???
override def setIn(in: InputStream): Unit = ???
override def setOut(out: PrintStream): Unit = ???
override def setErr(err: PrintStream): Unit = ???
override def console: Console = ???
override def inheritedChannel: Channel = ???
override def setSecurityManager(s: SecurityManager): Unit = ???
override def getSecurityManager: SecurityManager = ???
override def currentTimeMillis: Long = ???
override def nanoTime: Long = ???
override def arraycopy(src: AnyRef, srcPos: Int, dest: AnyRef, destPos: Int, length: Int): Unit = ???
override def identityHashCode(x: AnyRef): Int = ???
override def getProperties: Properties = ???
override def lineSeparator: String = ???
override def setProperties(props: Properties): Unit = ???
override def getProperty(key: String): String = ???
override def getProperty(key: String, default: String): String = ???
override def setProperty(key: String, value: String): String = ???
override def clearProperty(key: String): String = ???
override def getenv(name: String): String = ???
override def getenv: util.Map[String, String] = ???
override def exit(status: Int): Unit = ???
override def gc(): Unit = ???
override def runFinalization(): Unit = ???
@deprecated(
message =
"This method is inherently unsafe. It may result in" +
"finalizers being called on live objects while other threads are" +
"concurrently manipulating those objects, resulting in erratic" +
"behavior or deadlock.",
since = "JDK1.1")
override def runFinalizersOnExit(value: Boolean): Unit = ???
override def load(filename: String): Unit = ???
override def loadLibrary(libname: String): Unit = ???
override def mapLibraryName(libname: String): String = ???
}
|
SeanShubin/contract
|
test/src/main/scala/com/seanshubin/contract/test/SystemNotImplemented.scala
|
Scala
|
unlicense
| 2,171
|
package tetris.tetrominoes
import tetris.tetrominoes.Color.Yellow
/**
* Shapes:
* xx
* ox
*/
case class O(x: Int = 0, rotation: Int = 0) extends Tetromino {
val currentShape = Seq((0, 1), (1, 1), (1, 0))
def rotate: O = this
def allRotations: Seq[O] = Seq(new O(x))
def copy(x: Int = 0, rotation: Int = 0): O = new O(x, rotation)
def color = new Yellow
}
|
PapaCharlie/TetrisBot
|
tetris/src/main/scala/tetris/tetrominoes/O.scala
|
Scala
|
mit
| 376
|
package it.dtk.util
import java.net.NetworkInterface
import scala.collection.JavaConversions._
/**
* Created by fabiofumarola on 16/09/15.
*/
object HostIp {
def findAll(): Map[String, String] = {
val interfaces = NetworkInterface.getNetworkInterfaces
interfaces.flatMap { inet =>
inet.getInetAddresses.
map(e => inet.getDisplayName -> e.getHostAddress)
} toMap
}
def load(name: String): Option[String] = findAll().get(name)
}
|
DataToKnowledge/wheretolive-feed
|
feed-model/src/main/scala/it/dtk/util/HostIp.scala
|
Scala
|
apache-2.0
| 465
|
package org.randi3.method
import org.junit.runner.RunWith
import org.scalatest.matchers.MustMatchers
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CompleteRandomizationTest extends FunSpec with MustMatchers {
describe("A Stack") {
it("should pop values in last-in-first-out order") (pending)
it("should throw NoSuchElementException if an empty stack is popped") (pending)
}
}
|
dschrimpf/randi3-method-complete
|
src/test/scala/org/randi3/method/complete/CompleteRandomizationTest.scala
|
Scala
|
gpl-3.0
| 454
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cg.monadic.transformer.spark
import org.cg.monadic.transformer.TransformationPipeline
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SQLContext
/**
* @author WZ
*/
abstract class DataFrameTransformationPipeline[IN <: DataFrame]
extends TransformationPipeline[IN, IN] {
def sqlConext: SQLContext
}
|
CodeGerm/monadic-lib
|
src/main/scala/org/cg/monadic/transformer/spark/DataFrameTransformationPipeline.scala
|
Scala
|
apache-2.0
| 1,138
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import com.intellij.psi.PsiElement
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.lang.psi.api.base.ScAnnotations
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockStatement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScDocCommentOwner, ScMember}
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.result.Typeable
/**
* @author adkozlov
*/
trait ScValueOrVariable extends ScBlockStatement
with ScMember.WithBaseIconProvider
with ScDocCommentOwner
with ScDeclaredElementsHolder
with ScCommentOwner
with Typeable {
def keywordToken: PsiElement = findFirstChildByType(keywordElementType).get
protected def keywordElementType: IElementType
def isAbstract: Boolean
def isStable: Boolean
override def declaredElements: Seq[ScTypedDefinition]
def typeElement: Option[ScTypeElement]
// makes sense for definitions only, not declarations, but convenient to have here not to complicate hierarchy
def annotationAscription: Option[ScAnnotations] = None
def declaredType: Option[ScType] =
typeElement.flatMap {
_.`type`().toOption
}
final def hasExplicitType: Boolean = typeElement.isDefined
override protected def isSimilarMemberForNavigation(member: ScMember, isStrict: Boolean): Boolean = member match {
case other: ScValueOrVariable =>
for (thisName <- declaredNames;
otherName <- other.declaredNames
if thisName == otherName) {
return true
}
super.isSimilarMemberForNavigation(member, isStrict)
case _ => false
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScValueOrVariable.scala
|
Scala
|
apache-2.0
| 1,883
|
package io.simao.riepete.metric_receivers.riemann
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, ActorLogging}
import io.simao.riepete.messages.Metric
import scala.collection.immutable
import scala.util.Try
case object GetResetIntervalStats
import com.codahale.metrics._
sealed trait ConnectionStat
case class Received(count: Long) extends ConnectionStat
case class Sent(count: Long) extends ConnectionStat
case class SentFinished(duration: Long) extends ConnectionStat
case class Acked(count: Long) extends ConnectionStat
case class Dropped(count: Long) extends ConnectionStat
case class Failed(metrics: Seq[Metric], cause: Throwable) extends ConnectionStat
class RiemannConnectionStatsKeeper extends Actor with ActorLogging {
val metrics = new MetricRegistry()
var intervalStats = immutable.Map[String, Long]()
JmxReporter.forRegistry(metrics)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build().start()
@scala.throws[Exception](classOf[Exception])
override def preStart(): Unit = {
super.preStart()
resetInterval()
}
def receive: Receive = {
case m: ConnectionStat => m match {
case Received(count) =>
statsIncrement("received", count)
case Sent(count) =>
statsIncrement("sent", count)
metrics.meter("sentPerSecond").mark(count)
metrics.histogram("sentSize").update(count)
case SentFinished(duration) =>
metrics.timer("sendTime").update(duration, TimeUnit.MILLISECONDS)
case Acked(count) => statsIncrement("acked", count)
case Failed(q, _) => statsIncrement("ioerror", q.size)
case Dropped(count) => statsIncrement("dropped", count)
}
case GetResetIntervalStats =>
sender() ! statsMap()
resetInterval()
}
private def statsMap() = {
Map("totalSent" β getCounter("sent"),
"totalReceived" β getCounter("received"),
"acked" β getCounter("acked"),
"sendTime" β getTimer("sendTime"),
"dropped" β getCounter("dropped"),
"ioerrors" β getCounter("ioerror"),
"intervalAcked" β intervalStats("acked"),
"intervalSent" β intervalStats("sent"),
"intervalIoError" β intervalStats("ioerror"),
"sent/sec" β getMeter("sentPerSecond")
)
}
private def getMeter(key: String): String = {
Try(metrics.getMeters.get(key)) map { m β
val rate = m.getOneMinuteRate / 60
f"$rate%2.2f"
} getOrElse "n/a"
}
private def getCounter(key: String) = {
Try(metrics.getCounters.get(key).getCount).getOrElse(0)
}
private def getTimer(key: String) = {
Try(metrics.getTimers.get(key)) map { m =>
val snapshot = m.getSnapshot
val mean = snapshot.getMean * 1.0e-6
val stddev = snapshot.getStdDev * 1.0e-6
f"$mean%2.2fms (Ο=$stddev%2.2f)"
} getOrElse "n/a"
}
private def resetInterval() = {
intervalStats = immutable.Map("acked" β 0l, "sent" β 0l, "ioerror" β 0l)
}
private def statsIncrement(key: String, inc: Long = 1) = {
metrics.counter(key).inc(inc)
val c = intervalStats.getOrElse(key, 0l)
intervalStats = intervalStats.updated(key, c + inc)
}
}
|
simao/riepete
|
src/main/scala/io/simao/riepete/metric_receivers/riemann/RiemannConnectionStatsKeeper.scala
|
Scala
|
mit
| 3,200
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.