code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package kippercalendar.acceptance.spec import kippercalendar.acceptance.WebSpecification import org.joda.time.DateTime class DayCalendarSpec extends WebSpecification { object `The next day button should` { def `update the calendar title`() { val page = in.driver.nav.gotoPage() page.clickDisplayDayCalendar() page.clickNextDay() page.assertDayTitle(new DateTime().plusDays(1)) } } object `The previous day button should` { def `update the calendar title`() { val page = in.driver.nav.gotoPage() page.clickDisplayDayCalendar() page.clickPreviousDay() page.assertDayTitle(new DateTime().minusDays(1)) } } }
kipperjim/kipper-calendar
src/test/scala/kippercalendar/acceptance/spec/DayCalendarSpec.scala
Scala
mit
683
package chapter4 import akka.actor.Actor.Receive import akka.actor.{Terminated, ActorLogging, Actor, ActorRef} /** * Created by y28yang on 1/29/2016. */ class DbWatcher(dbWriter:ActorRef) extends Actor with ActorLogging{ override def receive = { case Terminated(actorRef) =>{ log.warning(s"actor $actorRef was terminated") } } }
wjingyao2008/firsttry
myarchtype/src/main/scala/chapter4/DbWatcher.scala
Scala
apache-2.0
353
object OptionMonoidApp extends App { import scalaz._ import Scalaz._ println( (none: Option[String]) |+| "andy".some ) println( Tags.First('a'.some) |+| Tags.First('b'.some) ) println( Tags.First(none: Option[Char]) |+| Tags.First('b'.some) ) println( Tags.First('a'.some) |+| Tags.First(none: Option[Char]) ) }
diegopacheco/scala-playground
scalaz/src/main/scala/OptionMonoidApp.scala
Scala
unlicense
359
object Test extends App { def test1(n: Int) = { val old = "old" val catcher: PartialFunction[Throwable, Unit] = { case e => println(e) } try { println(s"""Bob is ${s"$n"} years ${s"$old"}!""") } catch catcher try { println(s"""Bob is ${f"$n"} years ${s"$old"}!""") } catch catcher try { println(f"""Bob is ${s"$n"} years ${s"$old"}!""") } catch catcher try { println(f"""Bob is ${f"$n"} years ${s"$old"}!""") } catch catcher try { println(f"""Bob is ${f"$n%2d"} years ${s"$old"}!""") } catch catcher try { println(f"""Bob is ${s"$n%2d"} years ${s"$old"}!""") } catch catcher try { println(s"""Bob is ${f"$n%2d"} years ${s"$old"}!""") } catch catcher try { println(s"""Bob is ${s"$n%2d"} years ${s"$old"}!""") } catch catcher } test1(1) println("===============") test1(12) println("===============") test1(123) }
som-snytt/dotty
tests/pending/run/interpolationMultiline2.scala
Scala
apache-2.0
870
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.io.File import kafka.server.checkpoints.LeaderEpochCheckpoint import kafka.server.epoch.EpochEntry import kafka.server.epoch.LeaderEpochFileCache import kafka.utils.TestUtils import kafka.utils.TestUtils.checkEquals import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.{MockTime, Time, Utils} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import scala.jdk.CollectionConverters._ import scala.collection._ import scala.collection.mutable.ArrayBuffer class LogSegmentTest { val topicPartition = new TopicPartition("topic", 0) val segments = mutable.ArrayBuffer[LogSegment]() var logDir: File = _ /* create a segment with the given base offset */ def createSegment(offset: Long, indexIntervalBytes: Int = 10, time: Time = Time.SYSTEM): LogSegment = { val seg = LogTestUtils.createSegment(offset, logDir, indexIntervalBytes, time) segments += seg seg } /* create a ByteBufferMessageSet for the given messages starting from the given offset */ def records(offset: Long, records: String*): MemoryRecords = { MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, offset, CompressionType.NONE, TimestampType.CREATE_TIME, records.map { s => new SimpleRecord(offset * 10, s.getBytes) }: _*) } @BeforeEach def setup(): Unit = { logDir = TestUtils.tempDir() } @AfterEach def teardown(): Unit = { segments.foreach(_.close()) Utils.delete(logDir) } /** * A read on an empty log segment should return null */ @Test def testReadOnEmptySegment(): Unit = { val seg = createSegment(40) val read = seg.read(startOffset = 40, maxSize = 300) assertNull(read, "Read beyond the last offset in the segment should be null") } /** * Reading from before the first offset in the segment should return messages * beginning with the first message in the segment */ @Test def testReadBeforeFirstOffset(): Unit = { val seg = createSegment(40) val ms = records(50, "hello", "there", "little", "bee") seg.append(53, RecordBatch.NO_TIMESTAMP, -1L, ms) val read = seg.read(startOffset = 41, maxSize = 300).records checkEquals(ms.records.iterator, read.records.iterator) } /** * If we read from an offset beyond the last offset in the segment we should get null */ @Test def testReadAfterLast(): Unit = { val seg = createSegment(40) val ms = records(50, "hello", "there") seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms) val read = seg.read(startOffset = 52, maxSize = 200) assertNull(read, "Read beyond the last offset in the segment should give null") } /** * If we read from an offset which doesn't exist we should get a message set beginning * with the least offset greater than the given startOffset. */ @Test def testReadFromGap(): Unit = { val seg = createSegment(40) val ms = records(50, "hello", "there") seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms) val ms2 = records(60, "alpha", "beta") seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2) val read = seg.read(startOffset = 55, maxSize = 200) checkEquals(ms2.records.iterator, read.records.records.iterator) } /** * In a loop append two messages then truncate off the second of those messages and check that we can read * the first but not the second message. */ @Test def testTruncate(): Unit = { val seg = createSegment(40) var offset = 40 for (_ <- 0 until 30) { val ms1 = records(offset, "hello") seg.append(offset, RecordBatch.NO_TIMESTAMP, -1L, ms1) val ms2 = records(offset + 1, "hello") seg.append(offset + 1, RecordBatch.NO_TIMESTAMP, -1L, ms2) // check that we can read back both messages val read = seg.read(offset, 10000) assertEquals(List(ms1.records.iterator.next(), ms2.records.iterator.next()), read.records.records.asScala.toList) // now truncate off the last message seg.truncateTo(offset + 1) val read2 = seg.read(offset, 10000) assertEquals(1, read2.records.records.asScala.size) checkEquals(ms1.records.iterator, read2.records.records.iterator) offset += 1 } } @Test def testTruncateEmptySegment(): Unit = { // This tests the scenario in which the follower truncates to an empty segment. In this // case we must ensure that the index is resized so that the log segment is not mistakenly // rolled due to a full index val maxSegmentMs = 300000 val time = new MockTime val seg = createSegment(0, time = time) // Force load indexes before closing the segment seg.timeIndex seg.offsetIndex seg.close() val reopened = createSegment(0, time = time) assertEquals(0, seg.timeIndex.sizeInBytes) assertEquals(0, seg.offsetIndex.sizeInBytes) time.sleep(500) reopened.truncateTo(57) assertEquals(0, reopened.timeWaitedForRoll(time.milliseconds(), RecordBatch.NO_TIMESTAMP)) assertFalse(reopened.timeIndex.isFull) assertFalse(reopened.offsetIndex.isFull) var rollParams = RollParams(maxSegmentMs, maxSegmentBytes = Int.MaxValue, RecordBatch.NO_TIMESTAMP, maxOffsetInMessages = 100L, messagesSize = 1024, time.milliseconds()) assertFalse(reopened.shouldRoll(rollParams)) // The segment should not be rolled even if maxSegmentMs has been exceeded time.sleep(maxSegmentMs + 1) assertEquals(maxSegmentMs + 1, reopened.timeWaitedForRoll(time.milliseconds(), RecordBatch.NO_TIMESTAMP)) rollParams = RollParams(maxSegmentMs, maxSegmentBytes = Int.MaxValue, RecordBatch.NO_TIMESTAMP, maxOffsetInMessages = 100L, messagesSize = 1024, time.milliseconds()) assertFalse(reopened.shouldRoll(rollParams)) // But we should still roll the segment if we cannot fit the next offset rollParams = RollParams(maxSegmentMs, maxSegmentBytes = Int.MaxValue, RecordBatch.NO_TIMESTAMP, maxOffsetInMessages = Int.MaxValue.toLong + 200L, messagesSize = 1024, time.milliseconds()) assertTrue(reopened.shouldRoll(rollParams)) } @Test def testReloadLargestTimestampAndNextOffsetAfterTruncation(): Unit = { val numMessages = 30 val seg = createSegment(40, 2 * records(0, "hello").sizeInBytes - 1) var offset = 40 for (_ <- 0 until numMessages) { seg.append(offset, offset, offset, records(offset, "hello")) offset += 1 } assertEquals(offset, seg.readNextOffset) val expectedNumEntries = numMessages / 2 - 1 assertEquals(expectedNumEntries, seg.timeIndex.entries, s"Should have $expectedNumEntries time indexes") seg.truncateTo(41) assertEquals(0, seg.timeIndex.entries, s"Should have 0 time indexes") assertEquals(400L, seg.largestTimestamp, s"Largest timestamp should be 400") assertEquals(41, seg.readNextOffset) } /** * Test truncating the whole segment, and check that we can reappend with the original offset. */ @Test def testTruncateFull(): Unit = { // test the case where we fully truncate the log val time = new MockTime val seg = createSegment(40, time = time) seg.append(41, RecordBatch.NO_TIMESTAMP, -1L, records(40, "hello", "there")) // If the segment is empty after truncation, the create time should be reset time.sleep(500) assertEquals(500, seg.timeWaitedForRoll(time.milliseconds(), RecordBatch.NO_TIMESTAMP)) seg.truncateTo(0) assertEquals(0, seg.timeWaitedForRoll(time.milliseconds(), RecordBatch.NO_TIMESTAMP)) assertFalse(seg.timeIndex.isFull) assertFalse(seg.offsetIndex.isFull) assertNull(seg.read(0, 1024), "Segment should be empty.") seg.append(41, RecordBatch.NO_TIMESTAMP, -1L, records(40, "hello", "there")) } /** * Append messages with timestamp and search message by timestamp. */ @Test def testFindOffsetByTimestamp(): Unit = { val messageSize = records(0, s"msg00").sizeInBytes val seg = createSegment(40, messageSize * 2 - 1) // Produce some messages for (i <- 40 until 50) seg.append(i, i * 10, i, records(i, s"msg$i")) assertEquals(490, seg.largestTimestamp) // Search for an indexed timestamp assertEquals(42, seg.findOffsetByTimestamp(420).get.offset) assertEquals(43, seg.findOffsetByTimestamp(421).get.offset) // Search for an un-indexed timestamp assertEquals(43, seg.findOffsetByTimestamp(430).get.offset) assertEquals(44, seg.findOffsetByTimestamp(431).get.offset) // Search beyond the last timestamp assertEquals(None, seg.findOffsetByTimestamp(491)) // Search before the first indexed timestamp assertEquals(41, seg.findOffsetByTimestamp(401).get.offset) // Search before the first timestamp assertEquals(40, seg.findOffsetByTimestamp(399).get.offset) } /** * Test that offsets are assigned sequentially and that the nextOffset variable is incremented */ @Test def testNextOffsetCalculation(): Unit = { val seg = createSegment(40) assertEquals(40, seg.readNextOffset) seg.append(52, RecordBatch.NO_TIMESTAMP, -1L, records(50, "hello", "there", "you")) assertEquals(53, seg.readNextOffset) } /** * Test that we can change the file suffixes for the log and index files */ @Test def testChangeFileSuffixes(): Unit = { val seg = createSegment(40) val logFile = seg.log.file val indexFile = seg.lazyOffsetIndex.file val timeIndexFile = seg.lazyTimeIndex.file // Ensure that files for offset and time indices have not been created eagerly. assertFalse(seg.lazyOffsetIndex.file.exists) assertFalse(seg.lazyTimeIndex.file.exists) seg.changeFileSuffixes("", ".deleted") // Ensure that attempt to change suffixes for non-existing offset and time indices does not create new files. assertFalse(seg.lazyOffsetIndex.file.exists) assertFalse(seg.lazyTimeIndex.file.exists) // Ensure that file names are updated accordingly. assertEquals(logFile.getAbsolutePath + ".deleted", seg.log.file.getAbsolutePath) assertEquals(indexFile.getAbsolutePath + ".deleted", seg.lazyOffsetIndex.file.getAbsolutePath) assertEquals(timeIndexFile.getAbsolutePath + ".deleted", seg.lazyTimeIndex.file.getAbsolutePath) assertTrue(seg.log.file.exists) // Ensure lazy creation of offset index file upon accessing it. seg.lazyOffsetIndex.get assertTrue(seg.lazyOffsetIndex.file.exists) // Ensure lazy creation of time index file upon accessing it. seg.lazyTimeIndex.get assertTrue(seg.lazyTimeIndex.file.exists) } /** * Create a segment with some data and an index. Then corrupt the index, * and recover the segment, the entries should all be readable. */ @Test def testRecoveryFixesCorruptIndex(): Unit = { val seg = createSegment(0) for(i <- 0 until 100) seg.append(i, RecordBatch.NO_TIMESTAMP, -1L, records(i, i.toString)) val indexFile = seg.lazyOffsetIndex.file TestUtils.writeNonsenseToFile(indexFile, 5, indexFile.length.toInt) seg.recover(new ProducerStateManager(topicPartition, logDir)) for(i <- 0 until 100) { val records = seg.read(i, 1, minOneMessage = true).records.records assertEquals(i, records.iterator.next().offset) } } @Test def testRecoverTransactionIndex(): Unit = { val segment = createSegment(100) val producerEpoch = 0.toShort val partitionLeaderEpoch = 15 val sequence = 100 val pid1 = 5L val pid2 = 10L // append transactional records from pid1 segment.append(largestOffset = 101L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 100L, records = MemoryRecords.withTransactionalRecords(100L, CompressionType.NONE, pid1, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) // append transactional records from pid2 segment.append(largestOffset = 103L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 102L, records = MemoryRecords.withTransactionalRecords(102L, CompressionType.NONE, pid2, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) // append non-transactional records segment.append(largestOffset = 105L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 104L, records = MemoryRecords.withRecords(104L, CompressionType.NONE, partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) // abort the transaction from pid2 (note LSO should be 100L since the txn from pid1 has not completed) segment.append(largestOffset = 106L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 106L, records = endTxnRecords(ControlRecordType.ABORT, pid2, producerEpoch, offset = 106L)) // commit the transaction from pid1 segment.append(largestOffset = 107L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 107L, records = endTxnRecords(ControlRecordType.COMMIT, pid1, producerEpoch, offset = 107L)) var stateManager = new ProducerStateManager(topicPartition, logDir) segment.recover(stateManager) assertEquals(108L, stateManager.mapEndOffset) var abortedTxns = segment.txnIndex.allAbortedTxns assertEquals(1, abortedTxns.size) var abortedTxn = abortedTxns.head assertEquals(pid2, abortedTxn.producerId) assertEquals(102L, abortedTxn.firstOffset) assertEquals(106L, abortedTxn.lastOffset) assertEquals(100L, abortedTxn.lastStableOffset) // recover again, but this time assuming the transaction from pid2 began on a previous segment stateManager = new ProducerStateManager(topicPartition, logDir) stateManager.loadProducerEntry(new ProducerStateEntry(pid2, mutable.Queue[BatchMetadata](BatchMetadata(10, 10L, 5, RecordBatch.NO_TIMESTAMP)), producerEpoch, 0, RecordBatch.NO_TIMESTAMP, Some(75L))) segment.recover(stateManager) assertEquals(108L, stateManager.mapEndOffset) abortedTxns = segment.txnIndex.allAbortedTxns assertEquals(1, abortedTxns.size) abortedTxn = abortedTxns.head assertEquals(pid2, abortedTxn.producerId) assertEquals(75L, abortedTxn.firstOffset) assertEquals(106L, abortedTxn.lastOffset) assertEquals(100L, abortedTxn.lastStableOffset) } /** * Create a segment with some data, then recover the segment. * The epoch cache entries should reflect the segment. */ @Test def testRecoveryRebuildsEpochCache(): Unit = { val seg = createSegment(0) val checkpoint: LeaderEpochCheckpoint = new LeaderEpochCheckpoint { private var epochs = Seq.empty[EpochEntry] override def write(epochs: Iterable[EpochEntry]): Unit = { this.epochs = epochs.toVector } override def read(): Seq[EpochEntry] = this.epochs } val cache = new LeaderEpochFileCache(topicPartition, checkpoint) seg.append(largestOffset = 105L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 104L, records = MemoryRecords.withRecords(104L, CompressionType.NONE, 0, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) seg.append(largestOffset = 107L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 106L, records = MemoryRecords.withRecords(106L, CompressionType.NONE, 1, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) seg.append(largestOffset = 109L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 108L, records = MemoryRecords.withRecords(108L, CompressionType.NONE, 1, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) seg.append(largestOffset = 111L, largestTimestamp = RecordBatch.NO_TIMESTAMP, shallowOffsetOfMaxTimestamp = 110, records = MemoryRecords.withRecords(110L, CompressionType.NONE, 2, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) seg.recover(new ProducerStateManager(topicPartition, logDir), Some(cache)) assertEquals(ArrayBuffer(EpochEntry(epoch = 0, startOffset = 104L), EpochEntry(epoch = 1, startOffset = 106), EpochEntry(epoch = 2, startOffset = 110)), cache.epochEntries) } private def endTxnRecords(controlRecordType: ControlRecordType, producerId: Long, producerEpoch: Short, offset: Long, partitionLeaderEpoch: Int = 0, coordinatorEpoch: Int = 0, timestamp: Long = RecordBatch.NO_TIMESTAMP): MemoryRecords = { val marker = new EndTransactionMarker(controlRecordType, coordinatorEpoch) MemoryRecords.withEndTransactionMarker(offset, timestamp, partitionLeaderEpoch, producerId, producerEpoch, marker) } /** * Create a segment with some data and an index. Then corrupt the index, * and recover the segment, the entries should all be readable. */ @Test def testRecoveryFixesCorruptTimeIndex(): Unit = { val seg = createSegment(0) for(i <- 0 until 100) seg.append(i, i * 10, i, records(i, i.toString)) val timeIndexFile = seg.lazyTimeIndex.file TestUtils.writeNonsenseToFile(timeIndexFile, 5, timeIndexFile.length.toInt) seg.recover(new ProducerStateManager(topicPartition, logDir)) for(i <- 0 until 100) { assertEquals(i, seg.findOffsetByTimestamp(i * 10).get.offset) if (i < 99) assertEquals(i + 1, seg.findOffsetByTimestamp(i * 10 + 1).get.offset) } } /** * Randomly corrupt a log a number of times and attempt recovery. */ @Test def testRecoveryWithCorruptMessage(): Unit = { val messagesAppended = 20 for (_ <- 0 until 10) { val seg = createSegment(0) for (i <- 0 until messagesAppended) seg.append(i, RecordBatch.NO_TIMESTAMP, -1L, records(i, i.toString)) val offsetToBeginCorruption = TestUtils.random.nextInt(messagesAppended) // start corrupting somewhere in the middle of the chosen record all the way to the end val recordPosition = seg.log.searchForOffsetWithSize(offsetToBeginCorruption, 0) val position = recordPosition.position + TestUtils.random.nextInt(15) TestUtils.writeNonsenseToFile(seg.log.file, position, (seg.log.file.length - position).toInt) seg.recover(new ProducerStateManager(topicPartition, logDir)) assertEquals((0 until offsetToBeginCorruption).toList, seg.log.batches.asScala.map(_.lastOffset).toList, "Should have truncated off bad messages.") seg.deleteIfExists() } } private def createSegment(baseOffset: Long, fileAlreadyExists: Boolean, initFileSize: Int, preallocate: Boolean): LogSegment = { val tempDir = TestUtils.tempDir() val logConfig = LogConfig(Map( LogConfig.IndexIntervalBytesProp -> 10, LogConfig.SegmentIndexBytesProp -> 1000, LogConfig.SegmentJitterMsProp -> 0 ).asJava) val seg = LogSegment.open(tempDir, baseOffset, logConfig, Time.SYSTEM, fileAlreadyExists = fileAlreadyExists, initFileSize = initFileSize, preallocate = preallocate) segments += seg seg } /* create a segment with pre allocate, put message to it and verify */ @Test def testCreateWithInitFileSizeAppendMessage(): Unit = { val seg = createSegment(40, false, 512*1024*1024, true) val ms = records(50, "hello", "there") seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms) val ms2 = records(60, "alpha", "beta") seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2) val read = seg.read(startOffset = 55, maxSize = 200) checkEquals(ms2.records.iterator, read.records.records.iterator) } /* create a segment with pre allocate and clearly shut down*/ @Test def testCreateWithInitFileSizeClearShutdown(): Unit = { val tempDir = TestUtils.tempDir() val logConfig = LogConfig(Map( LogConfig.IndexIntervalBytesProp -> 10, LogConfig.SegmentIndexBytesProp -> 1000, LogConfig.SegmentJitterMsProp -> 0 ).asJava) val seg = LogSegment.open(tempDir, baseOffset = 40, logConfig, Time.SYSTEM, initFileSize = 512 * 1024 * 1024, preallocate = true) val ms = records(50, "hello", "there") seg.append(51, RecordBatch.NO_TIMESTAMP, -1L, ms) val ms2 = records(60, "alpha", "beta") seg.append(61, RecordBatch.NO_TIMESTAMP, -1L, ms2) val read = seg.read(startOffset = 55, maxSize = 200) checkEquals(ms2.records.iterator, read.records.records.iterator) val oldSize = seg.log.sizeInBytes() val oldPosition = seg.log.channel.position val oldFileSize = seg.log.file.length assertEquals(512*1024*1024, oldFileSize) seg.close() //After close, file should be trimmed assertEquals(oldSize, seg.log.file.length) val segReopen = LogSegment.open(tempDir, baseOffset = 40, logConfig, Time.SYSTEM, fileAlreadyExists = true, initFileSize = 512 * 1024 * 1024, preallocate = true) segments += segReopen val readAgain = segReopen.read(startOffset = 55, maxSize = 200) checkEquals(ms2.records.iterator, readAgain.records.records.iterator) val size = segReopen.log.sizeInBytes() val position = segReopen.log.channel.position val fileSize = segReopen.log.file.length assertEquals(oldPosition, position) assertEquals(oldSize, size) assertEquals(size, fileSize) } @Test def shouldTruncateEvenIfOffsetPointsToAGapInTheLog(): Unit = { val seg = createSegment(40) val offset = 40 def records(offset: Long, record: String): MemoryRecords = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, offset, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(offset * 1000, record.getBytes)) //Given two messages with a gap between them (e.g. mid offset compacted away) val ms1 = records(offset, "first message") seg.append(offset, RecordBatch.NO_TIMESTAMP, -1L, ms1) val ms2 = records(offset + 3, "message after gap") seg.append(offset + 3, RecordBatch.NO_TIMESTAMP, -1L, ms2) // When we truncate to an offset without a corresponding log entry seg.truncateTo(offset + 1) //Then we should still truncate the record that was present (i.e. offset + 3 is gone) val log = seg.read(offset, 10000) assertEquals(offset, log.records.batches.iterator.next().baseOffset()) assertEquals(1, log.records.batches.asScala.size) } @Test def testAppendFromFile(): Unit = { def records(offset: Long, size: Int): MemoryRecords = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, offset, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(new Array[Byte](size))) // create a log file in a separate directory to avoid conflicting with created segments val tempDir = TestUtils.tempDir() val fileRecords = FileRecords.open(Log.logFile(tempDir, 0)) // Simulate a scenario where we have a single log with an offset range exceeding Int.MaxValue fileRecords.append(records(0, 1024)) fileRecords.append(records(500, 1024 * 1024 + 1)) val sizeBeforeOverflow = fileRecords.sizeInBytes() fileRecords.append(records(Int.MaxValue + 5L, 1024)) val sizeAfterOverflow = fileRecords.sizeInBytes() val segment = createSegment(0) val bytesAppended = segment.appendFromFile(fileRecords, 0) assertEquals(sizeBeforeOverflow, bytesAppended) assertEquals(sizeBeforeOverflow, segment.size) val overflowSegment = createSegment(Int.MaxValue) val overflowBytesAppended = overflowSegment.appendFromFile(fileRecords, sizeBeforeOverflow) assertEquals(sizeAfterOverflow - sizeBeforeOverflow, overflowBytesAppended) assertEquals(overflowBytesAppended, overflowSegment.size) Utils.delete(tempDir) } }
Chasego/kafka
core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
Scala
apache-2.0
24,817
/* __ *\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL ** ** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\___/_/ |_/____/_/ | | ** ** |/ ** \* */ // GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp. package scala /** A function of 7 parameters. * */ trait Function7[-T1, -T2, -T3, -T4, -T5, -T6, -T7, +R] extends AnyRef { self => /** Apply the body of this function to the arguments. * @return the result of function application. */ def apply(v1: T1, v2: T2, v3: T3, v4: T4, v5: T5, v6: T6, v7: T7): R /** Creates a curried version of this function. * * @return a function `f` such that `f(x1)(x2)(x3)(x4)(x5)(x6)(x7) == apply(x1, x2, x3, x4, x5, x6, x7)` */ @annotation.unspecialized def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => R = { (x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7) => self.apply(x1, x2, x3, x4, x5, x6, x7)).curried } /** Creates a tupled version of this function: instead of 7 arguments, * it accepts a single [[scala.Tuple7]] argument. * * @return a function `f` such that `f((x1, x2, x3, x4, x5, x6, x7)) == f(Tuple7(x1, x2, x3, x4, x5, x6, x7)) == apply(x1, x2, x3, x4, x5, x6, x7)` */ @annotation.unspecialized def tupled: Tuple7[T1, T2, T3, T4, T5, T6, T7] => R = { case Tuple7(x1, x2, x3, x4, x5, x6, x7) => apply(x1, x2, x3, x4, x5, x6, x7) } override def toString() = "<function7>" }
felixmulder/scala
src/library/scala/Function7.scala
Scala
bsd-3-clause
1,798
/******************************************************************************* * Copyright (C) 2012 Łukasz Szpakowski. * * This library is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package org.lkbgraph.immutable import scala.collection.mutable.Builder import scala.collection.mutable.SetBuilder import org.lkbgraph._ /** A factory class for immutable graph. * * @author Łukasz Szpakowski */ abstract class GraphFactory[GG[XV, XX, XE[+XY, +XZ] <: EdgeLike[XY, XZ, XE]] <: base.GraphLike[XV, XX, XE, GG[XV, XX, XE]] with Graph[XV, XX, XE]] extends base.GraphFactory[GG] { override def newBuilder[V, X, E[+Y, +Z] <: EdgeLike[Y, Z, E]]: Builder[GraphParam[V, X, E], GG[V, X, E]] = new SetBuilder(empty) }
luckboy/LkbGraph
src/main/org/lkbgraph/immutable/GraphFactory.scala
Scala
lgpl-3.0
1,413
package Models import java.sql.Timestamp import org.squeryl.KeyedEntity /** * Created by Simon on 28/11/2014. */ // adapted from: http://www.srirangan.net/2011-03-getting-started-scala-persistence-with-squeryl class BaseEntity extends KeyedEntity[Long] { val id:Long = 0 var lastModified = new Timestamp(System.currentTimeMillis) }
J4g0n/testingSqueryl
src/main/scala/Models/BaseEntity.scala
Scala
mit
343
package org.webant.commons.store import org.webant.commons.entity.HttpDataEntity trait IStore[T <: HttpDataEntity] { def init(params: java.util.Map[String, Object]): Boolean = ??? def get(id: String): T = ??? def get(ids: Iterable[String]): Iterable[T] = ??? def save(data: T): Int = ??? def save(data: Iterable[T]): Int = ??? def update(data: T): Int = ??? def update(data: Iterable[T]): Int = ??? def upsert(data: T): Int = ??? def upsert(data: Iterable[T]): Int = ??? def delete(id: String): T = ??? def delete(ids: Iterable[String]): Iterable[T] = ??? def close(): Boolean = ??? }
sutine/webant
webant-commons/src/main/scala/org/webant/commons/store/IStore.scala
Scala
apache-2.0
617
import scala.collection.mutable.ArrayBuffer val x = new ArrayBuffer[Int] val y = new /*ref*/ImmutableMapAdaptor /* import scala.collection.mutable import scala.collection.mutable.ArrayBuffer val x = new ArrayBuffer[Int] val y = new mutable.ImmutableMapAdaptor */
JetBrains/intellij-scala
scala/scala-impl/testdata/autoImport/all/BufferProxy.scala
Scala
apache-2.0
264
package edu.umass.ciir.kbbridge.features import edu.umass.ciir.kbbridge.data.{ScoredWikipediaEntity, WikipediaEntity, EntityMention} /** * We do not add topic features */ object GalagoEntityLinkingFeatureLib { object GalagoEntityLinkingFeatures{ val featurePrefix = "GalagoEntityLinkingFeatures" val galagoScore1 = "galagoscore1" val galagoScoreNorm = "galagoscoreNorm" } trait GalagoEntityLinkingFeatures extends FeatureGenerator { import GalagoEntityLinkingFeatures._ def generateGalagoEntityLinkingFeatures(mention:EntityMention, entity:ScoredWikipediaEntity, otherCands:Seq[ScoredWikipediaEntity]) { addUnprefixedValueFeature("galagoscoreraw", entity.score) var prob = Math.exp(entity.score) addUnprefixedValueFeature("galagoscoreexp", prob) otherCands.headOption match { case Some(h) => { val K = h.score var sum = 0.0d //sum += prob for (cand <- otherCands) { sum += Math.exp(cand.score - K) } val normalized = Math.exp(h.score - (K + math.log(sum))) addUnprefixedValueFeature(galagoScoreNorm, normalized) } case None => { } } } private def addUnprefixedFeature(category:String, value:String) { addFeature(featurePrefix, category, value) } private def addUnprefixedValueFeature(category:String, value:Double) { addValueFeature(featurePrefix, category, value) } } }
daltonj/KbBridge
src/main/scala/edu/umass/ciir/kbbridge/features/GalagoEntityLinkingFeatureLib.scala
Scala
apache-2.0
1,512
package com.shocktrade.autonomous.dao import scala.scalajs.js /** * Represents a Trading Strategy's Selling Flow * @author Lawrence Daniels <lawrence.daniels@gmail.com> */ class SellingFlow(val profitTarget: js.UndefOr[Double]) extends js.Object /** * Selling Flow Companion * @author Lawrence Daniels <lawrence.daniels@gmail.com> */ object SellingFlow { /** * Selling Flow Enrichment * @param flow the given [[SellingFlow selling flow]] */ implicit class SellingFlowEnrichment(val flow: SellingFlow) extends AnyVal { @inline def isValid: Boolean = flow.profitTarget.nonEmpty } }
ldaniels528/shocktrade.js
app/server/robots/src/main/scala/com/shocktrade/autonomous/dao/SellingFlow.scala
Scala
apache-2.0
624
package com.cobble.swaggg.reference import com.cobble.swaggg.fluid.FluidSwag import net.minecraft.util.ResourceLocation import net.minecraftforge.fluids.{Fluid, FluidRegistry} object SwagggFluids { val fluidSwaggg: Fluid = new FluidSwag def registerFluids(): Unit = { println("Registering Fluids") FluidRegistry.registerFluid(fluidSwaggg) println(FluidRegistry.getFluid(fluidSwaggg.getName).getName) } }
Cobbleopolis/Swaggg
src/main/java/com/cobble/swaggg/reference/SwagggFluids.scala
Scala
lgpl-2.1
446
package gapt.proofs.lk import gapt.expr._ import BetaReduction.{ betaNormalize, _ } import gapt.expr.formula.All import gapt.expr.formula.Ex import gapt.expr.subst.Substitutable import gapt.expr.subst.Substitution import gapt.expr.util.freeVariables import gapt.expr.util.rename import gapt.proofs.SequentConnector import gapt.proofs.gaptic.OpenAssumption import gapt.proofs.lk.rules.AndLeftRule import gapt.proofs.lk.rules.AndRightRule import gapt.proofs.lk.rules.BottomAxiom import gapt.proofs.lk.rules.ContractionLeftRule import gapt.proofs.lk.rules.ContractionRightRule import gapt.proofs.lk.rules.CutRule import gapt.proofs.lk.rules.ConversionLeftRule import gapt.proofs.lk.rules.ConversionRightRule import gapt.proofs.lk.rules.EqualityLeftRule import gapt.proofs.lk.rules.EqualityRightRule import gapt.proofs.lk.rules.ExistsLeftRule import gapt.proofs.lk.rules.ExistsRightRule import gapt.proofs.lk.rules.ExistsSkLeftRule import gapt.proofs.lk.rules.ForallLeftRule import gapt.proofs.lk.rules.ForallRightRule import gapt.proofs.lk.rules.ForallSkRightRule import gapt.proofs.lk.rules.ImpLeftRule import gapt.proofs.lk.rules.ImpRightRule import gapt.proofs.lk.rules.InductionCase import gapt.proofs.lk.rules.InductionRule import gapt.proofs.lk.rules.LogicalAxiom import gapt.proofs.lk.rules.NegLeftRule import gapt.proofs.lk.rules.NegRightRule import gapt.proofs.lk.rules.OrLeftRule import gapt.proofs.lk.rules.OrRightRule import gapt.proofs.lk.rules.ProofLink import gapt.proofs.lk.rules.ReflexivityAxiom import gapt.proofs.lk.rules.TopAxiom import gapt.proofs.lk.rules.WeakeningLeftRule import gapt.proofs.lk.rules.WeakeningRightRule import gapt.proofs.lk.util.freeVariablesLK /** * Class that describes how LKProofs can be substituted. * * @param preserveEigenvariables If true, preserve eigenvariables and never change them. If false (the default), * treat eigenvariables as variables bound by their strong quantifier inferences and * perform capture-avoiding substitution. */ class LKProofSubstitutable( preserveEigenvariables: Boolean ) extends Substitutable[Substitution, LKProof, LKProof] { /** * * @param substitution The substitution to be applied. * @param proof The proof to apply the substitution to. * @return The substituted proof. */ override def applySubstitution( substitution: Substitution, proof: LKProof ): LKProof = if ( substitution.isIdentity ) proof else { val sub1 = if ( substitution.typeMap.isEmpty ) substitution else { Substitution( freeVariablesLK( proof ).diff( substitution.domain ).map( v => v -> substitution( v ).asInstanceOf[Var] ) ++ substitution.map, substitution.typeMap ) } go( sub1, proof ) } // if sub.typeMap.nonEmpty, then every free variable must in the domain of sub private def go( substitution: Substitution, proof: LKProof ): LKProof = proof match { case _ if substitution isIdentity => proof case ProofLink( referencedProof, linkquent ) => ProofLink( betaNormalize( substitution( referencedProof ) ), linkquent.map { f => betaNormalize( substitution( f ) ) } ) case TopAxiom => TopAxiom case BottomAxiom => BottomAxiom case LogicalAxiom( f ) => LogicalAxiom( betaNormalize( substitution( f ) ) ) case ReflexivityAxiom( t ) => ReflexivityAxiom( betaNormalize( substitution( t ) ) ) case WeakeningLeftRule( subProof, f ) => val subProofNew = go( substitution, subProof ) WeakeningLeftRule( subProofNew, betaNormalize( substitution( f ) ) ) case WeakeningRightRule( subProof, f ) => val subProofNew = go( substitution, subProof ) WeakeningRightRule( subProofNew, betaNormalize( substitution( f ) ) ) case ContractionLeftRule( subProof, aux1, aux2 ) => val subProofNew = go( substitution, subProof ) ContractionLeftRule( subProofNew, aux1, aux2 ) case ContractionRightRule( subProof, aux1, aux2 ) => val subProofNew = go( substitution, subProof ) ContractionRightRule( subProofNew, aux1, aux2 ) case CutRule( leftSubProof, aux1, rightSubProof, aux2 ) => val ( leftSubProofNew, rightSubProofNew ) = ( go( substitution, leftSubProof ), go( substitution, rightSubProof ) ) CutRule( leftSubProofNew, aux1, rightSubProofNew, aux2 ) case NegLeftRule( subProof, aux ) => val subProofNew = go( substitution, subProof ) NegLeftRule( subProofNew, aux ) case NegRightRule( subProof, aux ) => val subProofNew = go( substitution, subProof ) NegRightRule( subProofNew, aux ) case AndLeftRule( subProof, aux1, aux2 ) => val subProofNew = go( substitution, subProof ) AndLeftRule( subProofNew, aux1, aux2 ) case AndRightRule( leftSubProof, aux1, rightSubProof, aux2 ) => val ( leftSubProofNew, rightSubProofNew ) = ( go( substitution, leftSubProof ), go( substitution, rightSubProof ) ) AndRightRule( leftSubProofNew, aux1, rightSubProofNew, aux2 ) case OrLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) => val ( leftSubProofNew, rightSubProofNew ) = ( go( substitution, leftSubProof ), go( substitution, rightSubProof ) ) OrLeftRule( leftSubProofNew, aux1, rightSubProofNew, aux2 ) case OrRightRule( subProof, aux1, aux2 ) => val subProofNew = go( substitution, subProof ) OrRightRule( subProofNew, aux1, aux2 ) case ImpLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) => val ( leftSubProofNew, rightSubProofNew ) = ( go( substitution, leftSubProof ), go( substitution, rightSubProof ) ) ImpLeftRule( leftSubProofNew, aux1, rightSubProofNew, aux2 ) case ImpRightRule( subProof, aux1, aux2 ) => val subProofNew = go( substitution, subProof ) ImpRightRule( subProofNew, aux1, aux2 ) case p @ ForallLeftRule( subProof, aux, f, term, v ) => val subProofNew = go( substitution, subProof ) val All( newV, newF ) = substitution( p.mainFormula ) ForallLeftRule( subProofNew, aux, betaNormalize( newF ), betaNormalize( substitution( term ) ), newV ) case ForallRightRule( subProof, aux, eigen, quant ) if substitution.range contains eigen => require( !preserveEigenvariables, s"Cannot apply substitution: Eigenvariable $eigen is in range of substitution" ) val renamedEigen = rename( eigen, substitution.range union freeVariables( subProof.conclusion ) ) applySubstitution( substitution, ForallRightRule( applySubstitution( Substitution( eigen -> renamedEigen ), subProof ), aux, renamedEigen, quant ) ) case p @ ForallRightRule( subProof, aux, eigen, quant ) => val All( newQuant, _ ) = substitution( p.mainFormula ) val newEigen = Var( eigen.name, substitution( eigen.ty ) ) val newSubst = Substitution( substitution.map + ( eigen -> newEigen ), substitution.typeMap ) ForallRightRule( go( newSubst, subProof ), aux, newEigen, newQuant ) case ExistsLeftRule( subProof, aux, eigen, quant ) if substitution.range contains eigen => require( !preserveEigenvariables, s"Cannot apply substitution: Eigenvariable $eigen is in range of substitution" ) val renamedEigen = rename( eigen, substitution.range union freeVariables( subProof.conclusion ) ) applySubstitution( substitution, ExistsLeftRule( applySubstitution( Substitution( eigen -> renamedEigen ), subProof ), aux, renamedEigen, quant ) ) case p @ ExistsLeftRule( subProof, aux, eigen, quant ) => val Ex( newQuant, _ ) = substitution( p.mainFormula ) val newEigen = Var( eigen.name, substitution( eigen.ty ) ) val newSubst = Substitution( substitution.map + ( eigen -> newEigen ), substitution.typeMap ) ExistsLeftRule( go( newSubst, subProof ), aux, newEigen, newQuant ) case p @ ExistsRightRule( subProof, aux, f, term, v ) => val subProofNew = go( substitution, subProof ) val Ex( newV, newF ) = substitution( p.mainFormula ) ExistsRightRule( subProofNew, aux, betaNormalize( newF ), betaNormalize( substitution( term ) ), newV ) case p @ ExistsSkLeftRule( subProof, aux, main, skT ) => ExistsSkLeftRule( go( substitution, subProof ), aux, BetaReduction.betaNormalize( substitution( main ) ), substitution( skT ) ) case p @ ForallSkRightRule( subProof, aux, main, skT ) => ForallSkRightRule( go( substitution, subProof ), aux, BetaReduction.betaNormalize( substitution( main ) ), substitution( skT ) ) case EqualityLeftRule( subProof, eq, aux, con ) => val subProofNew = go( substitution, subProof ) EqualityLeftRule( subProofNew, eq, aux, substitution( con ).asInstanceOf[Abs] ) case EqualityRightRule( subProof, eq, aux, con ) => val subProofNew = go( substitution, subProof ) EqualityRightRule( subProofNew, eq, aux, substitution( con ).asInstanceOf[Abs] ) case InductionRule( cases, main, term ) => InductionRule( cases map { indCase( substitution, _ ) }, substitution( main ).asInstanceOf[Abs], substitution( term ) ) case ConversionLeftRule( subProof, aux, main ) => val subProofNew = go( substitution, subProof ) ConversionLeftRule( subProofNew, aux, substitution( main ) ) case ConversionRightRule( subProof, aux, main ) => val subProofNew = go( substitution, subProof ) ConversionRightRule( subProofNew, aux, substitution( main ) ) } private def indCase( subst: Substitution, c: InductionCase ): InductionCase = if ( subst.domain intersect c.eigenVars.toSet nonEmpty ) { indCase( Substitution( subst.map -- c.eigenVars.toSet, subst.typeMap ), c ) } else if ( subst.range intersect c.eigenVars.toSet nonEmpty ) { require( !preserveEigenvariables ) val renaming = rename( c.eigenVars, freeVariables( c.proof.endSequent ) -- c.eigenVars ++ subst.range ) indCase( subst, c.copy( applySubstitution( Substitution( renaming ), c.proof ), eigenVars = c.eigenVars map renaming ) ) } else { val newEigens = subst( c.eigenVars ).map( _.asInstanceOf[Var] ) c.copy( go( Substitution( subst.map ++ ( c.eigenVars zip newEigens ), subst.typeMap ), c.proof ), constructor = subst( c.constructor ).asInstanceOf[Const], eigenVars = newEigens ) } } class LKProofReplacer( repl: PartialFunction[Expr, Expr] ) { def apply( proof: LKProof ): LKProof = lkProofReplacerVisitor( proof, () ) private object lkProofReplacerVisitor extends LKVisitor[Unit] { override protected def visitOpenAssumption( proof: OpenAssumption, otherArg: Unit ): ( LKProof, SequentConnector ) = { val proofNew = OpenAssumption( for ( ( l, f ) <- proof.labelledSequent ) yield l -> TermReplacement( f, repl ), proof.index ) ( proofNew, SequentConnector( proofNew.conclusion, proof.conclusion, proof.conclusion.indicesSequent.map { Seq( _ ) } ) ) } override protected def visitLogicalAxiom( proof: LogicalAxiom, otherArg: Unit ): ( LKProof, SequentConnector ) = { val proofNew = LogicalAxiom( TermReplacement( proof.A, repl ) ) ( proofNew, SequentConnector( proofNew.conclusion, proof.conclusion, proof.conclusion.indicesSequent.map { Seq( _ ) } ) ) } override protected def visitReflexivityAxiom( proof: ReflexivityAxiom, otherArg: Unit ): ( LKProof, SequentConnector ) = { val proofNew = ReflexivityAxiom( TermReplacement( proof.s, repl ) ) ( proofNew, SequentConnector( proofNew.conclusion, proof.conclusion, proof.conclusion.indicesSequent.map { Seq( _ ) } ) ) } override protected def visitProofLink( proof: ProofLink, otherArg: Unit ): ( LKProof, SequentConnector ) = { val proofNew = ProofLink( TermReplacement( proof.referencedProof, repl ), TermReplacement( proof.conclusion, repl ) ) ( proofNew, SequentConnector( proofNew.conclusion, proof.conclusion, proof.conclusion.indicesSequent.map { Seq( _ ) } ) ) } override protected def visitWeakeningLeft( proof: WeakeningLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = { val ( subProofNew, subConnector ) = recurse( proof.subProof, () ) val proofNew = WeakeningLeftRule( subProofNew, TermReplacement( proof.formula, repl ) ) ( proofNew, ( proofNew.getSequentConnector * subConnector * proof.getSequentConnector.inv ) + ( proofNew.mainIndices( 0 ), proof.mainIndices( 0 ) ) ) } override protected def visitWeakeningRight( proof: WeakeningRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = { val ( subProofNew, subConnector ) = recurse( proof.subProof, () ) val proofNew = WeakeningRightRule( subProofNew, TermReplacement( proof.formula, repl ) ) ( proofNew, ( proofNew.getSequentConnector * subConnector * proof.getSequentConnector.inv ) + ( proofNew.mainIndices( 0 ), proof.mainIndices( 0 ) ) ) } override protected def visitForallLeft( proof: ForallLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ForallLeftRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.term, repl ) ) } override protected def visitForallRight( proof: ForallRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ForallRightRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.eigenVariable, repl ).asInstanceOf[Var] ) } override protected def visitForallSkRight( proof: ForallSkRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ForallSkRightRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.skolemTerm, repl ) ) } override protected def visitExistsRight( proof: ExistsRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ExistsRightRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.term, repl ) ) } override protected def visitExistsLeft( proof: ExistsLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ExistsLeftRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.eigenVariable, repl ).asInstanceOf[Var] ) } override protected def visitExistsSkLeft( proof: ExistsSkLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ExistsSkLeftRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ), TermReplacement( proof.skolemTerm, repl ) ) } override protected def visitEqualityLeft( proof: EqualityLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => EqualityLeftRule( subProofNew, subConnector.child( proof.eq ), subConnector.child( proof.aux ), TermReplacement( proof.replacementContext, repl ).asInstanceOf[Abs] ) } override protected def visitEqualityRight( proof: EqualityRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => EqualityRightRule( subProofNew, subConnector.child( proof.eq ), subConnector.child( proof.aux ), TermReplacement( proof.replacementContext, repl ).asInstanceOf[Abs] ) } override protected def visitDefinitionLeft( proof: ConversionLeftRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ConversionLeftRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ) ) } override protected def visitDefinitionRight( proof: ConversionRightRule, otherArg: Unit ): ( LKProof, SequentConnector ) = one2one( proof, otherArg ) { case Seq( ( subProofNew, subConnector ) ) => ConversionRightRule( subProofNew, subConnector.child( proof.aux ), TermReplacement( proof.mainFormula, repl ) ) } override protected def visitInduction( proof: InductionRule, otherArg: Unit ) = one2one( proof, otherArg ) { newSubProofs => InductionRule( for ( ( ( newSubProof, subConn ), oldCase ) <- newSubProofs.zip( proof.cases ) ) yield InductionCase( newSubProof, TermReplacement( oldCase.constructor, repl ).asInstanceOf[Const], oldCase.hypotheses.map( subConn.child ), oldCase.eigenVars.map( TermReplacement( _, repl ).asInstanceOf[Var] ), subConn.child( oldCase.conclusion ) ), TermReplacement( proof.formula, repl ).asInstanceOf[Abs], TermReplacement( proof.term, repl ) ) } } }
gapt/gapt
core/src/main/scala/gapt/proofs/lk/LKProofSubstitutable.scala
Scala
gpl-3.0
17,488
/* * Copyright 2012 Arktekk AS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package no.arktekk.atom.extension.georss /* * Copyright 2012 Arktekk AS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Copyright 2012 Arktekk AS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.specs2.mutable.Specification /** * @author Erlend Hamnaberg<erlend@hamnaberg.net> */ class PolygonSpec extends Specification { "A polygon" should { "be generated correctly" in { val expected = "45.256 -110.45 46.46 -109.48 43.84 -109.86 45.256 -110.45" val polygon = Polygon(List(Point(45.256, -110.45), Point(46.46, -109.48), Point(43.84, -109.86), Point(45.256, -110.45))) polygon.toValue("###.#####") must beEqualTo(expected) } "be parsed correctly" in { val input = "45.256 -110.45 46.46 -109.48 43.84 -109.86 45.256 -110.45" val expected = Polygon(Point(45.256, -110.45), Point(46.46, -109.48), Point(43.84, -109.86)) Polygon(input) should beEqualTo(Some(expected)) } } }
arktekk/scala-atom
src/test/scala/no/arktekk/atom/extension/georss/PolygonSpec.scala
Scala
apache-2.0
2,574
package scalan.util import scalan.BaseTests import scala.collection.{Seq, mutable} import scala.reflect.ClassTag class CollectionUtilTests extends BaseTests { import scalan.util.CollectionUtil._ import java.lang.{Byte => JByte, Integer} test("updateMany") { val xs: Seq[Byte] = Array[Byte](1,2,3) xs.updateMany(Seq.empty) shouldBe xs xs.updateMany(Seq(0 -> 2)) shouldBe Seq(2, 2, 3) xs.updateMany(Seq(0 -> 2, 2 -> 2)) shouldBe Seq(2, 2, 2) an[IndexOutOfBoundsException] should be thrownBy { xs.updateMany(Seq(3 -> 2)) } } test("concatArrays") { val xs = Array[Byte](1,2,3) val ys = Array[Byte](4,5,6) val zs = concatArrays(xs, ys) assertResult(Array[Byte](1, 2, 3, 4, 5, 6))(zs) // val jxs = Array[JByte](new JByte(1), new JByte(2), new JByte(3)) // val jys = Array[JByte](new JByte(4), new JByte(5), new JByte(6)) // val jzs = concatArrays(jxs, jys) // assertResult(Array[Byte](1, 2, 3, 4, 5, 6))(jzs) } def join(l: Map[Int,Int], r: Map[Int,Int]) = outerJoin(l, r)((_,l) => l, (_,r) => r, (k,l,r) => l + r) def joinSeqs(l: Seq[Int], r: Seq[Int]) = outerJoinSeqs(l, r)(l => l, r => r)((_,l) => l, (_,r) => r, (k,l,r) => l + r).map(_._2) def joinPairs(l: Seq[(String,Int)], r: Seq[(String,Int)]) = outerJoinSeqs(l, r)(l => l._1, r => r._1)((_,l) => l._2, (_,r) => r._2, (k,l,r) => l._2 + r._2) test("outerJoin maps") { val left = Map(1 -> 1, 2 -> 2, 3 -> 3) val right = Map(2 -> 2, 3 -> 3, 4 -> 4) assertResult(Map(1 -> 1, 2 -> 4, 3 -> 6, 4 -> 4))(join(left,right)) assertResult(Map(1 -> 1, 2 -> 2, 3 -> 3))(join(left,Map())) assertResult(Map(2 -> 2, 3 -> 3, 4 -> 4))(join(Map(), right)) assertResult(Map(2 -> 4, 3 -> 6, 4 -> 8))(join(right, right)) } test("outerJoinSeqs") { val left = Seq(1, 2, 3) val right = Seq(2, 3, 4) assertResult(Seq(1, 4, 6, 4))(joinSeqs(left, right)) assertResult(Seq(1, 2, 3))(joinSeqs(left,Seq())) assertResult(Seq(2, 3, 4))(joinSeqs(Seq(), right)) assertResult(Seq(4, 6, 8))(joinSeqs(right, right)) val inner = Seq("a" -> 1, "b" -> 2, "c" -> 3) val outer = Seq("b" -> 2, "c" -> 3, "d" -> 4) assertResult(Seq("a" -> 1, "b" -> 4, "c" -> 6, "d" -> 4))(joinPairs(inner, outer)) assertResult(Seq("a" -> 1, "b" -> 2, "c" -> 3))(joinPairs(inner,Seq())) assertResult(Seq("b" -> 2, "c" -> 3, "d" -> 4))(joinPairs(Seq(), outer)) assertResult(Seq("b" -> 4, "c" -> 6, "d" -> 8))(joinPairs(outer, outer)) } test("filterMap") { val xs = List(1, 2, 3) xs.filterMap(x => if (x <= 2) Some(s"x = $x") else None) should be(List("x = 1", "x = 2")) } test("mapUnzip") { val xs = Seq(1, 2, 3) { val (ints, strings, plus1s) = xs.mapUnzip((x: Int) => (x, x.toString, x + 1)) ints shouldBe Seq(1, 2, 3) strings shouldBe Seq("1", "2", "3") plus1s shouldBe Seq(2, 3, 4) } { val (ints, strings) = xs.mapUnzip((x: Int) => (x, x.toString)) ints shouldBe Seq(1, 2, 3) strings shouldBe Seq("1", "2", "3") } } test("mapFirst") { val xs = List(1, 2, 3) xs.findMap(x => if (x > 2) Some(s"x = $x") else None) should be(Some("x = 3")) xs.findMap(x => if (x > 3) Some(x) else None) should be(None) } val items: Iterable[(Int, String)] = Array((1, "a"), (2, "b"), (1, "c")) test("distinctBy") { val res = items.distinctBy(_._1) assertResult(Array((1, "a"), (2, "b")))(res) } test("mapReduce") { val res = items.mapReduce(p => (p._1, p._2))((v1, v2) => v1 + v2) assertResult(List((1, "ac"), (2, "b")))(res) } test("mergeWith") { type V = (Int, String) def key(p: V) = p._1 def merge(v1: V, v2: V) = (v1._1, v1._2 + v2._2) { val res = List().mergeWith(List(), key, merge) assertResult(List())(res) } { val res = List((1, "a"), (2, "b"), (1, "c")).mergeWith(List(), key, merge) assertResult(List((1, "ac"), (2, "b")))(res) } { val res = List().mergeWith(List((1, "a"), (2, "b"), (1, "c")), key, merge) assertResult(List((1, "ac"), (2, "b")))(res) } { val ys = List((2, "c"), (3, "d")) val res = List((1, "a"), (2, "b"), (1, "c")).mergeWith(ys, key, merge) assertResult(List((1, "ac"), (2, "bc"), (3, "d")))(res) } } test("zipWithExpandedBy") { assertResult(Array((2, 0), (2, 1)))(2.zipWithExpandedBy(x => List.range(0,x))) assertResult(Array((3, 0), (3, 1), (3, 2)))(3.zipWithExpandedBy(x => List.range(0,x))) } def treeStep(tree: Array[List[Int]]): Int => List[Int] = i => tree(i) test("traverseDepthFirst") { { val tree = Array( List(1, 2), // 0 List(), // 1 List(3), // 2 List()) // 3 assertResult(List(0, 1, 2, 3))(0.traverseDepthFirst(treeStep(tree))) } { /* 0 1 3 5 6 2 4 */ val tree = Array( List(1, 2), // 0 List(3), // 1 List(4), // 2 List(5,6), // 3 List(), // 4 List(), // 5 List() // 6 ) assertResult(List(0, 1, 3, 5, 6, 2, 4))(0.traverseDepthFirst(treeStep(tree))) } } test("partitionByType") { val xs: List[Any] = List(1, "a", "b", 2, 3, 1.0, 2.0) val (ints, others) = xs.partitionByType[Integer, Any] ints shouldBe(List(1,2,3)) val (strs, doubles) = others.partitionByType[String, Double] strs shouldBe(List("a", "b")) doubles shouldBe(List(1.0, 2.0)) } test("mapConserve") { class A(val x: Int) val x = new A(10) val opt = Option(x) opt.mapConserve(a => a) shouldBe theSameInstanceAs(opt) opt.mapConserve(a => new A(a.x)) should not be theSameInstanceAs(opt) } test("transformConserve") { class A(val x: Int) val x = new A(10) x.transformConserve(a => a) shouldBe theSameInstanceAs(x) x.transformConserve(a => new A(a.x)) should not be theSameInstanceAs(x) } test("sameElements2") { Seq(1, 2).sameElements2(List(1, 2)) shouldBe true new mutable.WrappedArray.ofInt(Array(1, 2)).sameElements2(Vector(1, 2)) shouldBe true Seq(new mutable.WrappedArray.ofInt(Array(1, 2)), 3).sameElements2(Array(Vector(1, 2), 3)) shouldBe true Seq(Array(1, 2), 3).sameElements2(Array(Vector(1, 2), 3)) shouldBe true Seq(Array(1, 2), Option(3)).sameElements2(Array(Vector(1, 2), List(3))) shouldBe false Seq(1, 2).sameElements2(List(1, 2, 3)) shouldBe false new mutable.WrappedArray.ofInt(Array(1, 2, 3)).sameElements2(Vector(1, 2)) shouldBe false Seq(new mutable.WrappedArray.ofInt(Array(1, 2, 3)), 3).sameElements2(Array(Vector(1, 2), 3)) shouldBe false } def unboxedArray[T:ClassTag](in: Seq[T]): Array[T] = { in.toArray[T] } test("unboxedArray") { // empty list unboxedArray(Seq[Any]()).isInstanceOf[Array[Any]] shouldBe true // primitive types unboxedArray(Seq[Byte](java.lang.Byte.valueOf(1.toByte))).isInstanceOf[Array[Byte]] shouldBe true Seq[Any](java.lang.Byte.valueOf(1.toByte)).toArray.isInstanceOf[Array[Byte]] shouldBe false Seq[Byte](1.toByte).toArray[Byte].isInstanceOf[Array[Byte]] shouldBe true unboxedArray(Seq[Short](java.lang.Short.valueOf(1.toShort))).isInstanceOf[Array[Short]] shouldBe true unboxedArray(Seq[Int](java.lang.Integer.valueOf(1))).isInstanceOf[Array[Int]] shouldBe true unboxedArray(Seq[Long](java.lang.Long.valueOf(1))).isInstanceOf[Array[Long]] shouldBe true unboxedArray(Seq[Double](java.lang.Double.valueOf(1.0))).isInstanceOf[Array[Double]] shouldBe true unboxedArray(Seq[Float](java.lang.Float.valueOf(1.0f))).isInstanceOf[Array[Float]] shouldBe true unboxedArray(Seq[Boolean](java.lang.Boolean.valueOf(true))).isInstanceOf[Array[Boolean]] shouldBe true unboxedArray(Seq[Char](java.lang.Character.valueOf('a'))).isInstanceOf[Array[Char]] shouldBe true unboxedArray(Seq[String]("str")).isInstanceOf[Array[String]] shouldBe true // non-primitive type unboxedArray(Seq[Any](Option.empty[Boolean])).isInstanceOf[Array[Any]] shouldBe true unboxedArray(Seq[Seq[Any]](Seq())).isInstanceOf[Array[Seq[Any]]] shouldBe true } }
ScorexFoundation/sigmastate-interpreter
common/src/test/scala/scalan/util/CollectionUtilTests.scala
Scala
mit
8,236
package models.base import scalaz._ import Scalaz._ import scalaz.effect.IO import scalaz.Validation import scalaz.Validation.FlatMap._ import scalaz.NonEmptyList._ import app.MConfig import models.base._ import models.Constants._ import io.megam.auth.funnel.FunnelErrors._ import io.megam.common.amqp.response.AMQPResponse import io.megam.util.Time /** * @author ram */ case class EventInput(id: String, accounts_id: String, etype: String, action: String, inputs: Map[String, String]) { val email = inputs(Events.EVTEMAIL) } case class EventResult(id: String, accounts_id: String, etype: String, action: String, inputs: Map[String, String], created_at: String) { def toKeyList: models.tosca.KeyValueList = models.tosca.KeyValueList(inputs) val json = "{\"id\":\"" + id + "\",\"accounts_id\":\"" + accounts_id + "\",\"type\":\"" + etype + "\",\"action\":\"" + action + "\",\"inputs\":" + models.tosca.KeyValueList.toJson(toKeyList) + "}" def topicFunc(x: Unit): Option[String] = "events".some } class Events(evi: EventInput) { //create request from input private def create(): ValidationNel[Throwable, Option[wash.PQd]] = { for { eres <- EventResult(evi.id, evi.accounts_id, evi.etype, evi.action, evi.inputs, Time.now.toString).some.successNel[Throwable] } yield { eres match { case Some(thatER) => { new wash.PQd(thatER.topicFunc, thatER.json).some } case None => { None //shouldn't happen } } } } // create a request and publish def createAndPub(): ValidationNel[Throwable, Option[wash.PQd]] = { (create() leftMap { err: NonEmptyList[Throwable] => err }).flatMap { pq: Option[wash.PQd] => if (!MConfig.mute_emails.contains(evi.email) && !MConfig.mute_events) { (new wash.AOneWasher(pq.get).wash). flatMap { maybeGS: AMQPResponse => play.api.Logger.warn(("%s%s%-20s%s%s").format(Console.GREEN, Console.BOLD, "Event","|+| ✔", Console.RESET)) pq.successNel[Throwable] } play.api.Logger.warn(("%s%s%-20s%s%s").format(Console.RED, Console.BOLD, "Event","|+| ✗", Console.RESET)) pq.successNel[Throwable] } else { play.api.Logger.warn(("%s%s%-20s%s%s").format(Console.YELLOW, Console.BOLD, "Event","|+| ●", Console.RESET)) wash.PQd.empty.some.successNel[Throwable] } } } } object Events { //types val EVENTUSER = "user" //inputs val EVTEMAIL = "email" val EVTPASSWORD_HASH = "password_hash" val EVTCLICK = "click_url" val EVTTOKEN = "token" //actions val CREATE = "0" val DESTROY = "1" val STATUS = "2" val DEDUCT = "3" val ONBOARD = "4" val RESET = "5" val INVITE = "6" val BALANCE = "7" val LOGIN = "8" def apply(aid: String, etype: String, eaction: String, inputs: Map[String, String]) = new Events(new EventInput("", aid, etype, eaction, inputs)) }
megamsys/verticegateway
app/models/base/Events.scala
Scala
mit
2,942
package com.cyrusinnovation.computation.persistence.writer import org.joda.time.DateTime import org.joda.time.format.ISODateTimeFormat import com.cyrusinnovation.computation.specification._ import LibraryInspector._ object LibraryInspectorForTables extends LibraryInspector { private val formatter = ISODateTimeFormat.dateTime() private def createTextContainerNode(label: String, textValue: String) = { createCompoundNode(label, Map("text" -> textValue), List()) } protected override def version(version: Version) = { val withoutComputationsWrapper = super.version(version).asInstanceOf[CompoundNode] withoutComputationsWrapper.copy(children = List(createNodeListNode("computations", withoutComputationsWrapper.children))) } protected override def imports(imports: Imports) = { val s = imports.importSequence.map(x => createTextContainerNode("import", x)) createNodeListNode("imports", s.toList) } protected override def sequentialComputationSpec(computation: SequentialComputationSpecification) = { val innerComputations = computation.innerSpecs.map(x => createNodeListNode("innerComputation", List(marshal(x)))) val computationList = createNodeListNode("innerComputations", innerComputations) createCompoundNode("sequentialComputation", Map.empty, List(computationList)) } protected override def mapping(mapping: Mapping) = { val mappingChildren = List(createTextContainerNode("key", mapping.key), createTextContainerNode("value", mapping.value)) createCompoundNode("mapping", Map.empty, mappingChildren) } protected override def dateTime(d: DateTime): String = { formatter.print(d) } }
psfblair/computation-engine
persistence/src/main/scala/com/cyrusinnovation/computation/persistence/writer/LibraryInspectorForTables.scala
Scala
apache-2.0
1,667
class hierarOverload { trait AB { type TB protected trait A { val entities: List[TB] } protected trait B } object NAnB { type TB = nB type TA = nA class nA { List[nB]() } class nB {} } def foo = { val t = new NAnB.TB() } } class hierarOverload2 { object NAnB { type TB = nB class nB } def foo = { val t = new NAnB.TB() } }
som-snytt/dotty
tests/pos/i1755.scala
Scala
apache-2.0
375
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala import org.apache.flink.annotation.{Internal, Public, PublicEvolving} import org.apache.flink.api.common.ExecutionConfig import org.apache.flink.api.common.functions.{FilterFunction, FlatMapFunction, MapFunction, Partitioner} import org.apache.flink.api.common.io.OutputFormat import org.apache.flink.api.common.operators.ResourceSpec import org.apache.flink.api.common.serialization.SerializationSchema import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.functions.KeySelector import org.apache.flink.api.java.tuple.{Tuple => JavaTuple} import org.apache.flink.api.java.typeutils.ResultTypeQueryable import org.apache.flink.api.scala.operators.ScalaCsvOutputFormat import org.apache.flink.core.fs.{FileSystem, Path} import org.apache.flink.streaming.api.collector.selector.OutputSelector import org.apache.flink.streaming.api.datastream.{AllWindowedStream => JavaAllWindowedStream, DataStream => JavaStream, KeyedStream => JavaKeyedStream, _} import org.apache.flink.streaming.api.functions.sink.SinkFunction import org.apache.flink.streaming.api.functions.timestamps.{AscendingTimestampExtractor, BoundedOutOfOrdernessTimestampExtractor} import org.apache.flink.streaming.api.functions.{AssignerWithPeriodicWatermarks, AssignerWithPunctuatedWatermarks, ProcessFunction, TimestampExtractor} import org.apache.flink.streaming.api.operators.OneInputStreamOperator import org.apache.flink.streaming.api.windowing.assigners._ import org.apache.flink.streaming.api.windowing.time.Time import org.apache.flink.streaming.api.windowing.windows.{GlobalWindow, TimeWindow, Window} import org.apache.flink.util.Collector import scala.collection.JavaConverters._ @Public class DataStream[T](stream: JavaStream[T]) { /** * Returns the [[StreamExecutionEnvironment]] associated with the current [[DataStream]]. * * @return associated execution environment * @deprecated Use [[executionEnvironment]] instead */ @deprecated @PublicEvolving def getExecutionEnvironment: StreamExecutionEnvironment = new StreamExecutionEnvironment(stream.getExecutionEnvironment) /** * Returns the TypeInformation for the elements of this DataStream. * * @deprecated Use [[dataType]] instead. */ @deprecated @PublicEvolving def getType(): TypeInformation[T] = stream.getType() /** * Returns the parallelism of this operation. * * @deprecated Use [[parallelism]] instead. */ @deprecated @PublicEvolving def getParallelism = stream.getParallelism /** * Returns the execution config. * * @deprecated Use [[executionConfig]] instead. */ @deprecated @PublicEvolving def getExecutionConfig = stream.getExecutionConfig /** * Returns the ID of the DataStream. */ @Internal private[flink] def getId = stream.getId() // -------------------------------------------------------------------------- // Scalaesk accessors // -------------------------------------------------------------------------- /** * Gets the underlying java DataStream object. */ def javaStream: JavaStream[T] = stream /** * Returns the TypeInformation for the elements of this DataStream. */ def dataType: TypeInformation[T] = stream.getType() /** * Returns the execution config. */ def executionConfig: ExecutionConfig = stream.getExecutionConfig() /** * Returns the [[StreamExecutionEnvironment]] associated with this data stream */ def executionEnvironment: StreamExecutionEnvironment = new StreamExecutionEnvironment(stream.getExecutionEnvironment()) /** * Returns the parallelism of this operation. */ def parallelism: Int = stream.getParallelism() /** * Sets the parallelism of this operation. This must be at least 1. */ def setParallelism(parallelism: Int): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.setParallelism(parallelism) case _ => throw new UnsupportedOperationException( "Operator " + stream + " cannot set the parallelism.") } this } def setMaxParallelism(maxParallelism: Int): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.setMaxParallelism(maxParallelism) case _ => throw new UnsupportedOperationException("Operator " + stream + " cannot set the maximum" + "paralllelism") } this } /** * Returns the minimum resources of this operation. */ @PublicEvolving def minResources: ResourceSpec = stream.getMinResources() /** * Returns the preferred resources of this operation. */ @PublicEvolving def preferredResources: ResourceSpec = stream.getPreferredResources() // --------------------------------------------------------------------------- // Fine-grained resource profiles are an incomplete work-in-progress feature // The setters are hence commented out at this point. // --------------------------------------------------------------------------- // /** // * Sets the minimum and preferred resources of this operation. // */ // @PublicEvolving // def resources(minResources: ResourceSpec, preferredResources: ResourceSpec) : DataStream[T] = // stream match { // case stream : SingleOutputStreamOperator[T] => asScalaStream( // stream.setResources(minResources, preferredResources)) // case _ => // throw new UnsupportedOperationException("Operator does not support " + // "configuring custom resources specs.") // this // } // // /** // * Sets the resource of this operation. // */ // @PublicEvolving // def resources(resources: ResourceSpec) : Unit = { // this.resources(resources, resources) // } /** * Gets the name of the current data stream. This name is * used by the visualization and logging during runtime. * * @return Name of the stream. */ def name: String = stream match { case stream : SingleOutputStreamOperator[T] => stream.getName case _ => throw new UnsupportedOperationException("Only supported for operators.") } // -------------------------------------------------------------------------- /** * Gets the name of the current data stream. This name is * used by the visualization and logging during runtime. * * @return Name of the stream. * @deprecated Use [[name]] instead */ @deprecated @PublicEvolving def getName : String = name /** * Sets the name of the current data stream. This name is * used by the visualization and logging during runtime. * * @return The named operator */ def name(name: String) : DataStream[T] = stream match { case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.name(name)) case _ => throw new UnsupportedOperationException("Only supported for operators.") this } /** * Sets an ID for this operator. * * The specified ID is used to assign the same operator ID across job * submissions (for example when starting a job from a savepoint). * * <strong>Important</strong>: this ID needs to be unique per * transformation and job. Otherwise, job submission will fail. * * @param uid The unique user-specified ID of this transformation. * @return The operator with the specified ID. */ @PublicEvolving def uid(uid: String) : DataStream[T] = javaStream match { case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.uid(uid)) case _ => throw new UnsupportedOperationException("Only supported for operators.") this } @PublicEvolving def getSideOutput[X: TypeInformation](tag: OutputTag[X]): DataStream[X] = javaStream match { case stream : SingleOutputStreamOperator[X] => asScalaStream(stream.getSideOutput(tag: OutputTag[X])) } /** * Sets an user provided hash for this operator. This will be used AS IS the create * the JobVertexID. * <p/> * <p>The user provided hash is an alternative to the generated hashes, that is * considered when identifying an operator through the default hash mechanics fails * (e.g. because of changes between Flink versions). * <p/> * <p><strong>Important</strong>: this should be used as a workaround or for trouble * shooting. The provided hash needs to be unique per transformation and job. Otherwise, * job submission will fail. Furthermore, you cannot assign user-specified hash to * intermediate nodes in an operator chain and trying so will let your job fail. * * @param hash the user provided hash for this operator. * @return The operator with the user provided hash. */ @PublicEvolving def setUidHash(hash: String) : DataStream[T] = javaStream match { case stream : SingleOutputStreamOperator[T] => asScalaStream(stream.setUidHash(hash)) case _ => throw new UnsupportedOperationException("Only supported for operators.") this } /** * Turns off chaining for this operator so thread co-location will not be * used as an optimization. </p> Chaining can be turned off for the whole * job by [[StreamExecutionEnvironment.disableOperatorChaining()]] * however it is not advised for performance considerations. * */ @PublicEvolving def disableChaining(): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.disableChaining() case _ => throw new UnsupportedOperationException("Only supported for operators.") } this } /** * Starts a new task chain beginning at this operator. This operator will * not be chained (thread co-located for increased performance) to any * previous tasks even if possible. * */ @PublicEvolving def startNewChain(): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.startNewChain() case _ => throw new UnsupportedOperationException("Only supported for operators.") } this } /** * Sets the slot sharing group of this operation. Parallel instances of * operations that are in the same slot sharing group will be co-located in the same * TaskManager slot, if possible. * * Operations inherit the slot sharing group of input operations if all input operations * are in the same slot sharing group and no slot sharing group was explicitly specified. * * Initially an operation is in the default slot sharing group. An operation can be put into * the default group explicitly by setting the slot sharing group to `"default"`. * * @param slotSharingGroup The slot sharing group name. */ @PublicEvolving def slotSharingGroup(slotSharingGroup: String): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.slotSharingGroup(slotSharingGroup) case _ => throw new UnsupportedOperationException("Only supported for operators.") } this } /** * Sets the maximum time frequency (ms) for the flushing of the output * buffer. By default the output buffers flush only when they are full. * * @param timeoutMillis * The maximum time between two output flushes. * @return The operator with buffer timeout set. */ def setBufferTimeout(timeoutMillis: Long): DataStream[T] = { stream match { case ds: SingleOutputStreamOperator[T] => ds.setBufferTimeout(timeoutMillis) case _ => throw new UnsupportedOperationException("Only supported for operators.") } this } // -------------------------------------------------------------------------- // Stream Transformations // -------------------------------------------------------------------------- /** * Creates a new DataStream by merging DataStream outputs of * the same type with each other. The DataStreams merged using this operator * will be transformed simultaneously. * */ def union(dataStreams: DataStream[T]*): DataStream[T] = asScalaStream(stream.union(dataStreams.map(_.javaStream): _*)) /** * Creates a new ConnectedStreams by connecting * DataStream outputs of different type with each other. The * DataStreams connected using this operators can be used with CoFunctions. */ def connect[T2](dataStream: DataStream[T2]): ConnectedStreams[T, T2] = asScalaStream(stream.connect(dataStream.javaStream)) /** * Groups the elements of a DataStream by the given key positions (for tuple/array types) to * be used with grouped operators like grouped reduce or grouped aggregations. */ def keyBy(fields: Int*): KeyedStream[T, JavaTuple] = asScalaStream(stream.keyBy(fields: _*)) /** * Groups the elements of a DataStream by the given field expressions to * be used with grouped operators like grouped reduce or grouped aggregations. */ def keyBy(firstField: String, otherFields: String*): KeyedStream[T, JavaTuple] = asScalaStream(stream.keyBy(firstField +: otherFields.toArray: _*)) /** * Groups the elements of a DataStream by the given K key to * be used with grouped operators like grouped reduce or grouped aggregations. */ def keyBy[K: TypeInformation](fun: T => K): KeyedStream[T, K] = { val cleanFun = clean(fun) val keyType: TypeInformation[K] = implicitly[TypeInformation[K]] val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] { def getKey(in: T) = cleanFun(in) override def getProducedType: TypeInformation[K] = keyType } asScalaStream(new JavaKeyedStream(stream, keyExtractor, keyType)) } /** * Partitions a tuple DataStream on the specified key fields using a custom partitioner. * This method takes the key position to partition on, and a partitioner that accepts the key * type. * * Note: This method works only on single field keys. */ def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: Int) : DataStream[T] = asScalaStream(stream.partitionCustom(partitioner, field)) /** * Partitions a POJO DataStream on the specified key fields using a custom partitioner. * This method takes the key expression to partition on, and a partitioner that accepts the key * type. * * Note: This method works only on single field keys. */ def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], field: String) : DataStream[T] = asScalaStream(stream.partitionCustom(partitioner, field)) /** * Partitions a DataStream on the key returned by the selector, using a custom partitioner. * This method takes the key selector to get the key to partition on, and a partitioner that * accepts the key type. * * Note: This method works only on single field keys, i.e. the selector cannot return tuples * of fields. */ def partitionCustom[K: TypeInformation](partitioner: Partitioner[K], fun: T => K) : DataStream[T] = { val keyType = implicitly[TypeInformation[K]] val cleanFun = clean(fun) val keyExtractor = new KeySelector[T, K] with ResultTypeQueryable[K] { def getKey(in: T) = cleanFun(in) override def getProducedType(): TypeInformation[K] = keyType } asScalaStream(stream.partitionCustom(partitioner, keyExtractor)) } /** * Sets the partitioning of the DataStream so that the output tuples * are broad casted to every parallel instance of the next component. */ def broadcast: DataStream[T] = asScalaStream(stream.broadcast()) /** * Sets the partitioning of the DataStream so that the output values all go to * the first instance of the next processing operator. Use this setting with care * since it might cause a serious performance bottleneck in the application. */ @PublicEvolving def global: DataStream[T] = asScalaStream(stream.global()) /** * Sets the partitioning of the DataStream so that the output tuples * are shuffled to the next component. */ @PublicEvolving def shuffle: DataStream[T] = asScalaStream(stream.shuffle()) /** * Sets the partitioning of the DataStream so that the output tuples * are forwarded to the local subtask of the next component (whenever * possible). */ def forward: DataStream[T] = asScalaStream(stream.forward()) /** * Sets the partitioning of the DataStream so that the output tuples * are distributed evenly to the next component. */ def rebalance: DataStream[T] = asScalaStream(stream.rebalance()) /** * Sets the partitioning of the [[DataStream]] so that the output tuples * are distributed evenly to a subset of instances of the downstream operation. * * The subset of downstream operations to which the upstream operation sends * elements depends on the degree of parallelism of both the upstream and downstream operation. * For example, if the upstream operation has parallelism 2 and the downstream operation * has parallelism 4, then one upstream operation would distribute elements to two * downstream operations while the other upstream operation would distribute to the other * two downstream operations. If, on the other hand, the downstream operation has parallelism * 2 while the upstream operation has parallelism 4 then two upstream operations will * distribute to one downstream operation while the other two upstream operations will * distribute to the other downstream operations. * * In cases where the different parallelisms are not multiples of each other one or several * downstream operations will have a differing number of inputs from upstream operations. */ @PublicEvolving def rescale: DataStream[T] = asScalaStream(stream.rescale()) /** * Initiates an iterative part of the program that creates a loop by feeding * back data streams. To create a streaming iteration the user needs to define * a transformation that creates two DataStreams. The first one is the output * that will be fed back to the start of the iteration and the second is the output * stream of the iterative part. * * stepfunction: initialStream => (feedback, output) * * A common pattern is to use output splitting to create feedback and output DataStream. * Please refer to the [[split]] method of the DataStream * * By default a DataStream with iteration will never terminate, but the user * can use the maxWaitTime parameter to set a max waiting time for the iteration head. * If no data received in the set time the stream terminates. * * Parallelism of the feedback stream must match the parallelism of the original stream. * Please refer to the [[setParallelism]] method for parallelism modification */ @PublicEvolving def iterate[R](stepFunction: DataStream[T] => (DataStream[T], DataStream[R]), maxWaitTimeMillis:Long = 0) : DataStream[R] = { val iterativeStream = stream.iterate(maxWaitTimeMillis) val (feedback, output) = stepFunction(new DataStream[T](iterativeStream)) iterativeStream.closeWith(feedback.javaStream) output } /** * Initiates an iterative part of the program that creates a loop by feeding * back data streams. To create a streaming iteration the user needs to define * a transformation that creates two DataStreams. The first one is the output * that will be fed back to the start of the iteration and the second is the output * stream of the iterative part. * * The input stream of the iterate operator and the feedback stream will be treated * as a ConnectedStreams where the input is connected with the feedback stream. * * This allows the user to distinguish standard input from feedback inputs. * * stepfunction: initialStream => (feedback, output) * * The user must set the max waiting time for the iteration head. * If no data received in the set time the stream terminates. If this parameter is set * to 0 then the iteration sources will indefinitely, so the job must be killed to stop. * */ @PublicEvolving def iterate[R, F: TypeInformation]( stepFunction: ConnectedStreams[T, F] => (DataStream[F], DataStream[R]), maxWaitTimeMillis:Long): DataStream[R] = { val feedbackType: TypeInformation[F] = implicitly[TypeInformation[F]] val connectedIterativeStream = stream.iterate(maxWaitTimeMillis). withFeedbackType(feedbackType) val (feedback, output) = stepFunction(asScalaStream(connectedIterativeStream)) connectedIterativeStream.closeWith(feedback.javaStream) output } /** * Creates a new DataStream by applying the given function to every element of this DataStream. */ def map[R: TypeInformation](fun: T => R): DataStream[R] = { if (fun == null) { throw new NullPointerException("Map function must not be null.") } val cleanFun = clean(fun) val mapper = new MapFunction[T, R] { def map(in: T): R = cleanFun(in) } map(mapper) } /** * Creates a new DataStream by applying the given function to every element of this DataStream. */ def map[R: TypeInformation](mapper: MapFunction[T, R]): DataStream[R] = { if (mapper == null) { throw new NullPointerException("Map function must not be null.") } val outType : TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(stream.map(mapper).returns(outType).asInstanceOf[JavaStream[R]]) } /** * Creates a new DataStream by applying the given function to every element and flattening * the results. */ def flatMap[R: TypeInformation](flatMapper: FlatMapFunction[T, R]): DataStream[R] = { if (flatMapper == null) { throw new NullPointerException("FlatMap function must not be null.") } val outType : TypeInformation[R] = implicitly[TypeInformation[R]] asScalaStream(stream.flatMap(flatMapper).returns(outType).asInstanceOf[JavaStream[R]]) } /** * Creates a new DataStream by applying the given function to every element and flattening * the results. */ def flatMap[R: TypeInformation](fun: (T, Collector[R]) => Unit): DataStream[R] = { if (fun == null) { throw new NullPointerException("FlatMap function must not be null.") } val cleanFun = clean(fun) val flatMapper = new FlatMapFunction[T, R] { def flatMap(in: T, out: Collector[R]) { cleanFun(in, out) } } flatMap(flatMapper) } /** * Creates a new DataStream by applying the given function to every element and flattening * the results. */ def flatMap[R: TypeInformation](fun: T => TraversableOnce[R]): DataStream[R] = { if (fun == null) { throw new NullPointerException("FlatMap function must not be null.") } val cleanFun = clean(fun) val flatMapper = new FlatMapFunction[T, R] { def flatMap(in: T, out: Collector[R]) { cleanFun(in) foreach out.collect } } flatMap(flatMapper) } /** * Applies the given [[ProcessFunction]] on the input stream, thereby * creating a transformed output stream. * * The function will be called for every element in the stream and can produce * zero or more output. * * @param processFunction The [[ProcessFunction]] that is called for each element * in the stream. */ @PublicEvolving def process[R: TypeInformation]( processFunction: ProcessFunction[T, R]): DataStream[R] = { if (processFunction == null) { throw new NullPointerException("ProcessFunction must not be null.") } asScalaStream(javaStream.process(processFunction, implicitly[TypeInformation[R]])) } /** * Creates a new DataStream that contains only the elements satisfying the given filter predicate. */ def filter(filter: FilterFunction[T]): DataStream[T] = { if (filter == null) { throw new NullPointerException("Filter function must not be null.") } asScalaStream(stream.filter(filter)) } /** * Creates a new DataStream that contains only the elements satisfying the given filter predicate. */ def filter(fun: T => Boolean): DataStream[T] = { if (fun == null) { throw new NullPointerException("Filter function must not be null.") } val cleanFun = clean(fun) val filterFun = new FilterFunction[T] { def filter(in: T) = cleanFun(in) } filter(filterFun) } /** * Windows this DataStream into tumbling time windows. * * This is a shortcut for either `.window(TumblingEventTimeWindows.of(size))` or * `.window(TumblingProcessingTimeWindows.of(size))` depending on the time characteristic * set using * [[StreamExecutionEnvironment.setStreamTimeCharacteristic]]. * * Note: This operation can be inherently non-parallel since all elements have to pass through * the same operator instance. (Only for special cases, such as aligned time windows is * it possible to perform this operation in parallel). * * @param size The size of the window. */ def timeWindowAll(size: Time): AllWindowedStream[T, TimeWindow] = { new AllWindowedStream(javaStream.timeWindowAll(size)) } /** * Windows this DataStream into sliding time windows. * * This is a shortcut for either `.window(SlidingEventTimeWindows.of(size, slide))` or * `.window(SlidingProcessingTimeWindows.of(size, slide))` depending on the time characteristic * set using * [[StreamExecutionEnvironment.setStreamTimeCharacteristic]]. * * Note: This operation can be inherently non-parallel since all elements have to pass through * the same operator instance. (Only for special cases, such as aligned time windows is * it possible to perform this operation in parallel). * * @param size The size of the window. */ def timeWindowAll(size: Time, slide: Time): AllWindowedStream[T, TimeWindow] = { new AllWindowedStream(javaStream.timeWindowAll(size, slide)) } /** * Windows this [[DataStream]] into sliding count windows. * * Note: This operation can be inherently non-parallel since all elements have to pass through * the same operator instance. (Only for special cases, such as aligned time windows is * it possible to perform this operation in parallel). * * @param size The size of the windows in number of elements. * @param slide The slide interval in number of elements. */ def countWindowAll(size: Long, slide: Long): AllWindowedStream[T, GlobalWindow] = { new AllWindowedStream(stream.countWindowAll(size, slide)) } /** * Windows this [[DataStream]] into tumbling count windows. * * Note: This operation can be inherently non-parallel since all elements have to pass through * the same operator instance. (Only for special cases, such as aligned time windows is * it possible to perform this operation in parallel). * * @param size The size of the windows in number of elements. */ def countWindowAll(size: Long): AllWindowedStream[T, GlobalWindow] = { new AllWindowedStream(stream.countWindowAll(size)) } /** * Windows this data stream to a [[AllWindowedStream]], which evaluates windows * over a key grouped stream. Elements are put into windows by a [[WindowAssigner]]. The grouping * of elements is done both by key and by window. * * A [[org.apache.flink.streaming.api.windowing.triggers.Trigger]] can be defined to specify * when windows are evaluated. However, `WindowAssigner` have a default `Trigger` * that is used if a `Trigger` is not specified. * * Note: This operation can be inherently non-parallel since all elements have to pass through * the same operator instance. (Only for special cases, such as aligned time windows is * it possible to perform this operation in parallel). * * @param assigner The `WindowAssigner` that assigns elements to windows. * @return The trigger windows data stream. */ @PublicEvolving def windowAll[W <: Window](assigner: WindowAssigner[_ >: T, W]): AllWindowedStream[T, W] = { new AllWindowedStream[T, W](new JavaAllWindowedStream[T, W](stream, assigner)) } /** * Extracts a timestamp from an element and assigns it as the internal timestamp of that element. * The internal timestamps are, for example, used to to event-time window operations. * * If you know that the timestamps are strictly increasing you can use an * [[AscendingTimestampExtractor]]. Otherwise, * you should provide a [[TimestampExtractor]] that also implements * [[TimestampExtractor#getCurrentWatermark]] to keep track of watermarks. * * @see org.apache.flink.streaming.api.watermark.Watermark */ @deprecated def assignTimestamps(extractor: TimestampExtractor[T]): DataStream[T] = { asScalaStream(stream.assignTimestamps(clean(extractor))) } /** * Assigns timestamps to the elements in the data stream and periodically creates * watermarks to signal event time progress. * * This method creates watermarks periodically (for example every second), based * on the watermarks indicated by the given watermark generator. Even when no new elements * in the stream arrive, the given watermark generator will be periodically checked for * new watermarks. The interval in which watermarks are generated is defined in * [[org.apache.flink.api.common.ExecutionConfig#setAutoWatermarkInterval(long)]]. * * Use this method for the common cases, where some characteristic over all elements * should generate the watermarks, or where watermarks are simply trailing behind the * wall clock time by a certain amount. * * For the second case and when the watermarks are required to lag behind the maximum * timestamp seen so far in the elements of the stream by a fixed amount of time, and this * amount is known in advance, use the * [[BoundedOutOfOrdernessTimestampExtractor]]. * * For cases where watermarks should be created in an irregular fashion, for example * based on certain markers that some element carry, use the * [[AssignerWithPunctuatedWatermarks]]. * * @see AssignerWithPeriodicWatermarks * @see AssignerWithPunctuatedWatermarks * @see #assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks) */ @PublicEvolving def assignTimestampsAndWatermarks(assigner: AssignerWithPeriodicWatermarks[T]): DataStream[T] = { asScalaStream(stream.assignTimestampsAndWatermarks(assigner)) } /** * Assigns timestamps to the elements in the data stream and periodically creates * watermarks to signal event time progress. * * This method creates watermarks based purely on stream elements. For each element * that is handled via [[AssignerWithPunctuatedWatermarks#extractTimestamp(Object, long)]], * the [[AssignerWithPunctuatedWatermarks#checkAndGetNextWatermark()]] method is called, * and a new watermark is emitted, if the returned watermark value is larger than the previous * watermark. * * This method is useful when the data stream embeds watermark elements, or certain elements * carry a marker that can be used to determine the current event time watermark. * This operation gives the programmer full control over the watermark generation. Users * should be aware that too aggressive watermark generation (i.e., generating hundreds of * watermarks every second) can cost some performance. * * For cases where watermarks should be created in a regular fashion, for example * every x milliseconds, use the [[AssignerWithPeriodicWatermarks]]. * * @see AssignerWithPunctuatedWatermarks * @see AssignerWithPeriodicWatermarks * @see #assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks) */ @PublicEvolving def assignTimestampsAndWatermarks(assigner: AssignerWithPunctuatedWatermarks[T]) : DataStream[T] = { asScalaStream(stream.assignTimestampsAndWatermarks(assigner)) } /** * Assigns timestamps to the elements in the data stream and periodically creates * watermarks to signal event time progress. * * This method is a shortcut for data streams where the element timestamp are known * to be monotonously ascending within each parallel stream. * In that case, the system can generate watermarks automatically and perfectly * by tracking the ascending timestamps. * * For cases where the timestamps are not monotonously increasing, use the more * general methods [[assignTimestampsAndWatermarks(AssignerWithPeriodicWatermarks)]] * and [[assignTimestampsAndWatermarks(AssignerWithPunctuatedWatermarks)]]. */ @PublicEvolving def assignAscendingTimestamps(extractor: T => Long): DataStream[T] = { val cleanExtractor = clean(extractor) val extractorFunction = new AscendingTimestampExtractor[T] { def extractAscendingTimestamp(element: T): Long = { cleanExtractor(element) } } asScalaStream(stream.assignTimestampsAndWatermarks(extractorFunction)) } /** * * Operator used for directing tuples to specific named outputs using an * OutputSelector. Calling this method on an operator creates a new * [[SplitStream]]. */ def split(selector: OutputSelector[T]): SplitStream[T] = asScalaStream(stream.split(selector)) /** * Creates a new [[SplitStream]] that contains only the elements satisfying the * given output selector predicate. */ def split(fun: T => TraversableOnce[String]): SplitStream[T] = { if (fun == null) { throw new NullPointerException("OutputSelector must not be null.") } val cleanFun = clean(fun) val selector = new OutputSelector[T] { def select(in: T): java.lang.Iterable[String] = { cleanFun(in).toIterable.asJava } } split(selector) } /** * Creates a co-group operation. See [[CoGroupedStreams]] for an example of how the keys * and window can be specified. */ def coGroup[T2](otherStream: DataStream[T2]): CoGroupedStreams[T, T2] = { new CoGroupedStreams(this, otherStream) } /** * Creates a join operation. See [[JoinedStreams]] for an example of how the keys * and window can be specified. */ def join[T2](otherStream: DataStream[T2]): JoinedStreams[T, T2] = { new JoinedStreams(this, otherStream) } /** * Writes a DataStream to the standard output stream (stdout). For each * element of the DataStream the result of .toString is * written. * */ @PublicEvolving def print(): DataStreamSink[T] = stream.print() /** * Writes a DataStream to the standard output stream (stderr). * * For each element of the DataStream the result of * [[AnyRef.toString()]] is written. * * @return The closed DataStream. */ @PublicEvolving def printToErr() = stream.printToErr() /** * Writes a DataStream to the file specified by path in text format. For * every element of the DataStream the result of .toString is written. * * @param path The path pointing to the location the text file is written to * @return The closed DataStream */ @PublicEvolving def writeAsText(path: String): DataStreamSink[T] = stream.writeAsText(path) /** * Writes a DataStream to the file specified by path in text format. For * every element of the DataStream the result of .toString is written. * * @param path The path pointing to the location the text file is written to * @param writeMode Controls the behavior for existing files. Options are NO_OVERWRITE and * OVERWRITE. * @return The closed DataStream */ @PublicEvolving def writeAsText(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = { if (writeMode != null) { stream.writeAsText(path, writeMode) } else { stream.writeAsText(path) } } /** * Writes the DataStream in CSV format to the file specified by the path parameter. The writing * is performed periodically every millis milliseconds. * * @param path Path to the location of the CSV file * @return The closed DataStream */ @PublicEvolving def writeAsCsv(path: String): DataStreamSink[T] = { writeAsCsv( path, null, ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER, ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER) } /** * Writes the DataStream in CSV format to the file specified by the path parameter. The writing * is performed periodically every millis milliseconds. * * @param path Path to the location of the CSV file * @param writeMode Controls whether an existing file is overwritten or not * @return The closed DataStream */ @PublicEvolving def writeAsCsv(path: String, writeMode: FileSystem.WriteMode): DataStreamSink[T] = { writeAsCsv( path, writeMode, ScalaCsvOutputFormat.DEFAULT_LINE_DELIMITER, ScalaCsvOutputFormat.DEFAULT_FIELD_DELIMITER) } /** * Writes the DataStream in CSV format to the file specified by the path parameter. The writing * is performed periodically every millis milliseconds. * * @param path Path to the location of the CSV file * @param writeMode Controls whether an existing file is overwritten or not * @param rowDelimiter Delimiter for consecutive rows * @param fieldDelimiter Delimiter for consecutive fields * @return The closed DataStream */ @PublicEvolving def writeAsCsv( path: String, writeMode: FileSystem.WriteMode, rowDelimiter: String, fieldDelimiter: String) : DataStreamSink[T] = { require(stream.getType.isTupleType, "CSV output can only be used with Tuple DataSets.") val of = new ScalaCsvOutputFormat[Product](new Path(path), rowDelimiter, fieldDelimiter) if (writeMode != null) { of.setWriteMode(writeMode) } stream.writeUsingOutputFormat(of.asInstanceOf[OutputFormat[T]]) } /** * Writes a DataStream using the given [[OutputFormat]]. */ @PublicEvolving def writeUsingOutputFormat(format: OutputFormat[T]): DataStreamSink[T] = { stream.writeUsingOutputFormat(format) } /** * Writes the DataStream to a socket as a byte array. The format of the output is * specified by a [[SerializationSchema]]. */ @PublicEvolving def writeToSocket( hostname: String, port: Integer, schema: SerializationSchema[T]): DataStreamSink[T] = { stream.writeToSocket(hostname, port, schema) } /** * Adds the given sink to this DataStream. Only streams with sinks added * will be executed once the StreamExecutionEnvironment.execute(...) * method is called. * */ def addSink(sinkFunction: SinkFunction[T]): DataStreamSink[T] = stream.addSink(sinkFunction) /** * Adds the given sink to this DataStream. Only streams with sinks added * will be executed once the StreamExecutionEnvironment.execute(...) * method is called. * */ def addSink(fun: T => Unit): DataStreamSink[T] = { if (fun == null) { throw new NullPointerException("Sink function must not be null.") } val cleanFun = clean(fun) val sinkFunction = new SinkFunction[T] { def invoke(in: T) = cleanFun(in) } this.addSink(sinkFunction) } /** * Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning * is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]]. */ private[flink] def clean[F <: AnyRef](f: F): F = { new StreamExecutionEnvironment(stream.getExecutionEnvironment).scalaClean(f) } /** * Transforms the [[DataStream]] by using a custom [[OneInputStreamOperator]]. * * @param operatorName name of the operator, for logging purposes * @param operator the object containing the transformation logic * @tparam R the type of elements emitted by the operator */ @PublicEvolving def transform[R: TypeInformation]( operatorName: String, operator: OneInputStreamOperator[T, R]): DataStream[R] = { asScalaStream(stream.transform(operatorName, implicitly[TypeInformation[R]], operator)) } }
zimmermatt/flink
flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/DataStream.scala
Scala
apache-2.0
40,934
package scodec.protocols package time import fs2._ import java.time.Instant class TimeSeriesTransducerTest extends ProtocolsSpec { "the TimeSeriesTransducer type" should { "support combining two transducers via an either" in { val add1: Transform.Aux[Unit, Int, Int] = Transform.lift(_ + 1) val add2: Transform.Aux[Unit, Int, Int] = Transform.lift(_ + 2) val x: Transform.Aux[Unit, Either[Int, Int], Int] = add1.choice(add2).xmapState(_._1)(u => (u,u)) val source: TimeSeries[Pure, Either[Int, Int]] = Stream( TimeStamped(Instant.ofEpochMilli(0), Right(1)), TimeStamped(Instant.ofEpochMilli(500), Left(2)), TimeStamped(Instant.ofEpochMilli(1500), Right(3)) ).through(TimeSeries.interpolateTicks()) source.through(TimeSeries.preserve(x).toPipe).toList shouldBe List( TimeSeriesValue(Instant.ofEpochMilli(0), 3), TimeSeriesValue(Instant.ofEpochMilli(500), 3), TimeSeriesValue.tick(Instant.ofEpochMilli(1000)), TimeSeriesValue(Instant.ofEpochMilli(1500), 5)) } } }
scodec/scodec-protocols
src/test/scala/scodec/protocols/time/TimeSeriesTransducerTest.scala
Scala
bsd-3-clause
1,086
package org.ensime.fixture import akka.util.Timeout import com.typesafe.config.ConfigFactory import java.util.concurrent.TimeUnit import org.scalatest._ import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.testkit._ /** * Normally a TestKit will reuse the same actor system for all tests * in a suite, but sometimes isolation of the system is needed on a * per-test basis, this fixture adds support for that. * * Instead of extending TestKit, use withActorSystem and import * the parameter for all implicits. * * Inspired by https://gist.github.com/derekwyatt/3138807 */ trait TestKitFixture { require( !this.isInstanceOf[TestKit], "IsolatedActorSystems are incompatible with TestKit. Instead, 'import sys._'" ) implicit protected val timeout: Timeout = ConfigFactory.load().getDuration("akka.test.default-timeout", TimeUnit.MILLISECONDS) milliseconds def withTestKit(testCode: TestKitFix => Any): Any } class TestKitFix extends TestKit(ActorSystem()) with ImplicitSender trait IsolatedTestKitFixture extends TestKitFixture { override def withTestKit(testCode: TestKitFix => Any): Any = { val sys = new TestKitFix try { testCode(sys) } finally { sys.system.shutdown() sys.system.awaitTermination() } } } // this seems redundant, because it mimics "extends TestKit" behaviour, // but it allows for easy swapping with the refreshing implementation trait SharedTestKitFixture extends TestKitFixture with BeforeAndAfterAll { this: Suite => var _testkit: TestKitFix = _ override def beforeAll(): Unit = { super.beforeAll() _testkit = new TestKitFix } override def afterAll(): Unit = { super.afterAll() _testkit.system.shutdown() _testkit.system.awaitTermination() } override def withTestKit(testCode: TestKitFix => Any): Any = testCode(_testkit) }
jacobono/ensime-server
core/src/it/scala/org/ensime/fixture/TestKitFixture.scala
Scala
gpl-3.0
1,877
package com.github.ldaniels528.trifecta.messages.codec.avro import com.github.ldaniels528.tabular.Tabular import org.apache.avro.generic.GenericRecord import scala.collection.JavaConversions._ /** * Adds Avro table capabilities to a Tabular instance * @author lawrence.daniels@gmail.com */ trait AvroTables { self: Tabular => def transformAvro(records: Seq[GenericRecord], reqFields: Seq[String]): Seq[String] = { if (records.isEmpty) Nil else { val fields = if (reqFields.nonEmpty) reqFields else records.head.getSchema.getFields.map(_.name.trim).toSeq val rows = records map { r => Map(fields map (k => (k, asString(r.get(k)).trim)): _*) } makeTable(fields, rows) } } }
ldaniels528/trifecta
src/main/scala/com/github/ldaniels528/trifecta/messages/codec/avro/AvroTables.scala
Scala
apache-2.0
729
package org.scalatra import org.scalatra.test.specs2.MutableScalatraSpec import scala.concurrent.Future class StableResultServlet extends ScalatraServlet with FutureSupport { override implicit val executor = scala.concurrent.ExecutionContext.global before("/*") { contentType = "text/html" } get("/ok") { Ok(123) } get("/future-as-result") { Future { // request is the same as in the request handling thread _and_ it is not invalidated by the servlet container println(request.getSession) Ok(123) } } // rewritten to: // // { // class cls$macro$3 extends org.scalatra.StableResult { // val is = scala.concurrent.Future.apply[org.scalatra.ActionResult]({ // scala.Predef.println(request.getSession()); // Ok.apply(123, Ok.apply$default$2, Ok.apply$default$3) // })(StableResultServlet.this.executor) // }; // val res$macro$4 = new cls$macro$3(); // res$macro$4.is // } notFound { halt(404, <h1>Not found.</h1>) } // rewritten to: // // doNotFound = (() => { // class cls$macro$5 extends org.scalatra.StableResult { // def <init>() = { // super.<init>(); // () // }; // val is = StableResultServlet.this.halt[scala.xml.Elem](scala.this.Predef.int2Integer(404), { // { // new scala.xml.Elem(null, "h1", scala.xml.Null, scala.this.xml.TopScope, false, ({ // val $buf = new scala.xml.NodeBuffer(); // $buf.&+(new scala.xml.Text("Not found.")); // $buf // }: _*)) // } // }, StableResultServlet.this.halt$default$3[Nothing], StableResultServlet.this.halt$default$4[Nothing])(reflect.this.ManifestFactory.classType[scala.xml.Elem](classOf[scala.xml.Elem])) // }; // val res$macro$6 = new cls$macro$5(); // res$macro$6.is // }) // // make sure that ScalatraBase methods are invoked // class X { // val routes: Int = 100 // val addRoute: Int = 100 // val addStatusRoute: Int = 100 // val doNotFound: Int = 100 // val doMethodNotAllowed: Int = 100 // val errorHandler: Int = 100 // val asynchronously: Int = 100 // // before("/*") { // contentType = "text/html" // } // // after("/*") { // contentType = "text/html" // } // // get("/foo") { // // } // // asyncGet("/foo") { // // } // // error { // case e => // } // // methodNotAllowed { // case methods => // } // // trap(100) { // // } // // notFound { // // } // } case class Route(x: Int) // here are some more issues which will be addressed in the future: // // var futureEffect = false // // // here the request will be invalidated by the servlet container and will lead to an IllegalStateException when accessing it from the Future // get("/future-as-sideeffect") { // Future { // // // wait for Jetty to invalidate the request/response objects // Thread.sleep(1000) // // // try to access the session // println(request.getSession) // // java.lang.IllegalStateException: No SessionManager // // at org.eclipse.jetty.server.Request.getSession(Request.java:1402) // // at org.eclipse.jetty.server.Request.getSession(Request.java:1377) // // futureEffect = true // // } recover { // case t => println(t.getMessage); t.printStackTrace() // } // // Ok(123) // } // // get("/future-effect") { // futureEffect // } // // def doFooUnsafe = Future { // request // } // // def doFooSafe(implicit request: HttpServletRequest) = Future { // request // } // // // here the doFooUnsafe does close over the ThreadLocal-backed request from ScalatraBase // get("/future-unsafe") { // doFooUnsafe // } // // // this is the correct way at the moment, doFooSafe uses the request from StableResult // get("/future-safe") { // doFooSafe // } } class StableResultSpec extends MutableScalatraSpec { mount(classOf[StableResultServlet], "/*") "An ActionResult is rendered" in { get("/ok") { status must beEqualTo(200) body must beEqualTo("123") } } "A Future can be returned as a result and there one can safely close over request/response" in { get("/future-as-result") { status must beEqualTo(200) body must beEqualTo("123") } } // "It is safe to close over request/response in a Future which is not returned as a result" in { // get("/future-as-sideeffect") { // status must beEqualTo(200) // body must beEqualTo("123") // } // // Thread.sleep(5000) // // get("/future-effect") { // status must beEqualTo(200) // body must beEqualTo("true") // } // } }
lightvector/scalatra
core/src/test/scala/org/scalatra/StableResultSpec.scala
Scala
bsd-2-clause
4,930
/* * Accio is a platform to launch computer science experiments. * Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk> * * Accio is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Accio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Accio. If not, see <http://www.gnu.org/licenses/>. */ package fr.cnrs.liris.lumos.cli import com.twitter.util.Future import fr.cnrs.liris.infra.cli.app.{Environment, ExitCode} import fr.cnrs.liris.lumos.domain.thrift.{ExecState, ThriftAdapter} import fr.cnrs.liris.lumos.server.ListJobsRequest import fr.cnrs.liris.util.StringUtils.padTo import org.joda.time.Instant import org.joda.time.format.DateTimeFormat final class GetCommand extends LumosCommand { private[this] val allFlag = flag("all", false, "Show all resources, including those inactive") private[this] val labelsFlag = flag[String]("labels", "Show only resources including one of given labels") private[this] val ownerFlag = flag[String]("owner", "Show only resources belonging to a given owner") private[this] val limitFlag = flag[Int]("n", "Limit the number of shown resources") override def name = "get" override def execute(residue: Seq[String], env: Environment): Future[ExitCode] = { var states = Set[ExecState](ExecState.Pending, ExecState.Scheduled, ExecState.Running) if (allFlag()) { states ++= Set(ExecState.Failed, ExecState.Successful, ExecState.Canceled, ExecState.Lost) } val req = ListJobsRequest( owner = ownerFlag.get, labels = labelsFlag.get, state = Some(states), limit = limitFlag.get) val client = createLumosClient(env) client.listJobs(req).map { resp => val columns = Seq(("ID", 32), ("CREATED", 15), ("STATUS", 9), ("LABELS", 30)) columns.zipWithIndex.foreach { case ((name, width), idx) => if (idx > 0) { env.reporter.outErr.printOut(" ") } env.reporter.outErr.printOut(padTo(name.toUpperCase, width)) } env.reporter.outErr.printOutLn() val rows = resp.jobs.map(ThriftAdapter.toDomain).map { job => Seq(job.name, humanize(job.createTime), job.status.state.name, job.labels.map { case (k, v) => s"$k=$v" }.mkString(", ")) } rows.foreach { row => columns.zipWithIndex.foreach { case ((_, width), idx) => if (idx > 0) { env.reporter.outErr.printOut(" ") } env.reporter.outErr.printOut(padTo(row(idx), width)) } env.reporter.outErr.printOutLn() } val moreJobs = if (resp.totalCount > resp.jobs.size) resp.totalCount - resp.jobs.size else 0 if (moreJobs > 0) { env.reporter.outErr.printOutLn(s"$moreJobs more...") } ExitCode.Success } } private[this] val timeFormat = DateTimeFormat.forPattern("YYYY-MM-dd HH:mm") private def humanize(time: Instant) = timeFormat.print(time) }
privamov/accio
accio/java/fr/cnrs/liris/lumos/cli/GetCommand.scala
Scala
gpl-3.0
3,348
package uk.gov.gds.ier.transaction.crown.waysToVote import uk.gov.gds.ier.test.FormTestSuite import uk.gov.gds.ier.model.{WaysToVote, WaysToVoteType} class WaysToVoteFormTests extends FormTestSuite with WaysToVoteForms { it should "error out on empty input" in { val emptyRequest = Map.empty[String, String] waysToVoteForm.bind(emptyRequest).fold( formWithErrors => { formWithErrors.errors("waysToVote").head.message should be ("Please answer this question") formWithErrors.globalErrorMessages should be (Seq("Please answer this question")) }, formWithSuccess => fail("Should have thrown an error") ) } it should "bind successfully on in person going to polling station option" in { val request = Json.toJson( Map( "waysToVote.wayType" -> "in-person" ) ) waysToVoteForm.bind(request).fold( formWithErrors => fail(serialiser.toJson(formWithErrors.prettyPrint)), formWithSuccess => { formWithSuccess.waysToVote.isDefined should be(true) formWithSuccess.waysToVote should be(Some(WaysToVote(WaysToVoteType.InPerson))) } ) } it should "bind successfully on by post option" in { val request = Json.toJson( Map( "waysToVote.wayType" -> "by-post" ) ) waysToVoteForm.bind(request).fold( formWithErrors => fail(serialiser.toJson(formWithErrors.prettyPrint)), formWithSuccess => { formWithSuccess.waysToVote.isDefined should be(true) formWithSuccess.waysToVote should be(Some(WaysToVote(WaysToVoteType.ByPost))) } ) } it should "bind successfully on by proxy option" in { val request = Json.toJson( Map( "waysToVote.wayType" -> "by-proxy" ) ) waysToVoteForm.bind(request).fold( formWithErrors => fail(serialiser.toJson(formWithErrors.prettyPrint)), formWithSuccess => { formWithSuccess.waysToVote.isDefined should be(true) formWithSuccess.waysToVote should be(Some(WaysToVote(WaysToVoteType.ByProxy))) } ) } it should "error out on incorrect way to vote type" in { val request = Json.toJson( Map( "waysToVote.wayType" -> "foofoo" ) ) waysToVoteForm.bind(request).fold( formWithErrors => { formWithErrors.errors("waysToVote.wayType").head.message should be ("Unknown type") formWithErrors.globalErrorMessages should be (Seq("Unknown type")) }, formWithSuccess => fail("Should have thrown an error") ) } }
michaeldfallen/ier-frontend
test/uk/gov/gds/ier/transaction/crown/waysToVote/WaysToVoteFormTests.scala
Scala
mit
2,551
package scheduler.kafka.manager import kafka.manager.ActorModel._ import kafka.manager._ import org.apache.curator.framework.CuratorFramework import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} case class KafkaSchedulerCommandActorConfig(schedulerConfig: SchedulerConfig, curator: CuratorFramework, longRunningPoolConfig: LongRunningPoolConfig, askTimeoutMillis: Long = 400, version: KafkaVersion) class KafkaSchedulerCommandActor(kafkaCommandActorConfig: KafkaSchedulerCommandActorConfig) extends BaseCommandActor with LongRunningPoolActor { val schedulerRestClient = new SchedulerRestClient(kafkaCommandActorConfig.schedulerConfig.apiUrl)(play.api.libs.concurrent.Execution.Implicits.defaultContext) @scala.throws[Exception](classOf[Exception]) override def preStart() = { log.info("Started actor %s".format(self.path)) } @scala.throws[Exception](classOf[Exception]) override def preRestart(reason: Throwable, message: Option[Any]) { log.error(reason, "Restarting due to [{}] when processing [{}]", reason.getMessage, message.getOrElse("")) super.preRestart(reason, message) } @scala.throws[Exception](classOf[Exception]) override def postStop(): Unit = { super.postStop() } override protected def longRunningPoolConfig: LongRunningPoolConfig = kafkaCommandActorConfig.longRunningPoolConfig override protected def longRunningQueueFull(): Unit = { sender ! KCCommandResult(Try(throw new UnsupportedOperationException("Long running executor blocking queue is full!"))) } override def processActorResponse(response: ActorResponse): Unit = { response match { case any: Any => log.warning("ksca : processActorResponse : Received unknown message: {}", any) } } def futureToKCCommandResult[T](future: Future[T])(implicit ec: ExecutionContext): Future[KCCommandResult] = { future.map { _ => KCCommandResult(Success(())) }.recover { case e: Throwable => KCCommandResult(Failure(e)) } } override def processCommandRequest(request: CommandRequest): Unit = { implicit val ec = longRunningExecutionContext request match { case KSCAddBroker(id, cpus, mem, heap, port, bindAddress, constraints, options, log4jOptions, jvmOptions, stickinessPeriod, failover) => longRunning { futureToKCCommandResult(schedulerRestClient.addBroker(id, cpus, mem, heap, port, bindAddress, constraints, options, log4jOptions, jvmOptions, stickinessPeriod, failover)) } case KSCUpdateBroker(id, cpus, mem, heap, port, bindAddress, constraints, options, log4jOptions, jvmOptions, stickinessPeriod, failover) => longRunning { futureToKCCommandResult(schedulerRestClient.updateBroker(id, cpus, mem, heap, port, bindAddress, constraints, options, log4jOptions, jvmOptions, stickinessPeriod, failover)) } case KSCStartBroker(id) => longRunning { futureToKCCommandResult(schedulerRestClient.startBroker(id)) } case KSCStopBroker(id) => longRunning { futureToKCCommandResult(schedulerRestClient.stopBroker(id)) } case KSCRemoveBroker(id) => longRunning { futureToKCCommandResult(schedulerRestClient.removeBroker(id)) } case KSCRebalanceTopics(ids, topics) => longRunning { futureToKCCommandResult(schedulerRestClient.rebalanceTopics(ids, topics)) } case any: Any => log.warning("ksca : processCommandRequest : Received unknown message: {}", any) } } }
stealthly/kafka-manager
app/scheduler/kafka/manager/KafkaSchedulerCommandActor.scala
Scala
apache-2.0
3,761
sealed abstract class TA sealed abstract class TB extends TA case object B extends TB case object B2 extends TB case class CC(i: Int, tb: TB) object Test { // Should warn that CC(_, B2) isn't matched def foo: CC => Unit = { case CC(_, B) => () } }
som-snytt/dotty
tests/patmat/t9398.scala
Scala
apache-2.0
259
package baskingcat.act.gameplay import baskingcat.act._ trait Walkable[A <: Status, B <: Form, C <: Direction] extends Movable[A, B] { obj: GameplayObject => val speed: Float def walk[D <: Direction: Manifest]: Walkable[_ <: Status, B, D] }
halcat0x15a/act
src/main/scala/baskingcat/act/gameplay/Walkable.scala
Scala
bsd-3-clause
250
package dw.job.playview import scala.math.random import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.SparkConf import java.util.concurrent.atomic.AtomicInteger import scala.collection.mutable.HashSet import scala.concurrent.Future import scala.util.control.Breaks._ import java.io.File import dw.job.click.LogUtil // /** Computes an approximation to pi */ object PlayviewDataJob { val conf = new SparkConf() .setMaster("local") .setAppName("PlayViewBatchAnalytics") .set("spark.executor.memory", "1g") val sc = new SparkContext(conf) val sqlContext = new org.apache.spark.sql.SQLContext(sc) // this is used to implicitly convert an RDD to a DataFrame. import sqlContext.implicits._ val counter = new AtomicInteger() def processLogFile(logFile: String) { val logData = sc.textFile(logFile, 2).cache() val words = logData.foreach({ line => { //val toks = line.split(";") println(line) } }) println("------------------------------------------------------------") val numAs = logData.filter(line => line.contains("www.yan.vn")).count() println("Lines with a: %s".format(numAs)) println("line count %s".format(logData.count())) println("first line: %s".format(logData.first())) logData.unpersist(true) } case class Playview(loggedTime: Int, placementId: Int, ip: String, uuid: String, url: String, urlReferer: String, locCity: String, locCountry: String, platformId: Int, deviceType: String, deviceOs: String) def main(args: Array[String]) { var path = "/home/trieu/data/raw_logs/playview-fptlay/day-2015-06-30/hour-20/" // val files = LogUtil.recursiveListFiles(new File(path)) // def handler(f: File) { // println(f.getAbsolutePath) // processLogFile(f.getAbsolutePath) // Thread.sleep(500) // } // files.foreach { handler } val file = path + "3.log" // Create a custom class to represent the Customer // sc.textFile(file, 2).cache().foreach({ line => // { // val toks = line.split("\\t") // var i = counter.incrementAndGet() // println(i +" "+toks(8)) // } // }) // Create a DataFrame of Playview objects from the dataset text file. val dfPlayviews = sc.textFile(file).cache().map(_.split("\\t")).map(p => Playview( p(0).trim.toInt, p(1).trim.toInt, p(2), p(3), p(4), p(5), p(6), p(7), p(8).trim.toInt, p(9), p(10))).toDF() // Register DataFrame as a table. dfPlayviews.registerTempTable("playviews") // Display the content of DataFrame //dfPlayviews.show() // SQL statements can be run by using the sql methods provided by sqlContext. val results = sqlContext.sql("SELECT DISTINCT url FROM playviews WHERE locCity = 'Ho Chi Minh City' AND platformId = 3") //playviewsByCity.show() results.map(t => "url: " + String.valueOf(t(0)).split("#").apply(0) ).collect().foreach(println) //sc.stop() } }
trieu/learn-spark
data-analytics-spark/src/main/scala/dw/job/playview/PlayviewDataJob.scala
Scala
apache-2.0
3,140
package com.eevolution.context.dictionary.infrastructure.repository import com.eevolution.context.dictionary.domain.model.WorkflowProcessor import com.eevolution.context.dictionary.infrastructure.db.DbContext._ /** * Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala * Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 31/10/17. */ /** * Workflow Processor Mapping */ trait WorkflowProcessorMapping { val queryWorkflowProcessor = quote { querySchema[WorkflowProcessor]("AD_WorkflowProcessor", _.workflowProcessorId-> "AD_WorkflowProcessor_ID", _.tenantId-> "AD_Client_ID", _.organizationId -> "AD_Org_ID" , _.isActive-> "IsActive", _.created-> "Created", _.createdBy-> "CreatedBy", _.updated-> "Updated", _.updatedBy-> "UpdatedBy", _.name-> "Name", _.description-> "Description", _.frequencyType-> "FrequencyType", _.frequency-> "Frequency", _.dateLastRun-> "DateLastRun", _.dateNextRun-> "DateNextRun", _.supervisorId-> "Supervisor_ID", _.keepLogDays-> "KeepLogDays", _.processing-> "Processing", _.inactivityAlertDays-> "InactivityAlertDays", _.remindDays-> "RemindDays", _.alertOverPriority-> "AlertOverPriority", _.uuid-> "UUID") } }
adempiere/ADReactiveSystem
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WorkflowProcessorMapping.scala
Scala
gpl-3.0
2,092
/*********************************************************************** * Copyright (c) 2013-2018 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.apache.spark.sql.jts import com.vividsolutions.jts.geom._ import org.apache.spark.sql._ import org.apache.spark.sql.types._ object JTSTypes { val GeometryTypeInstance = new GeometryUDT val PointTypeInstance = new PointUDT val LineStringTypeInstance = new LineStringUDT val PolygonTypeInstance = new PolygonUDT val MultiPointTypeInstance = new MultiPointUDT val MultiLineStringTypeInstance = new MultiLineStringUDT val MultipolygonTypeInstance = new MultiPolygonUDT val GeometryCollectionTypeInstance = new GeometryCollectionUDT // these constant values conform to WKB values val GeometryType = 0 val PointType = 1 val LineStringType = 2 val PolygonType = 3 val MultiPointType = 4 val MultiLineStringType = 5 val MultiPolygonType = 6 val GeometryCollectionType = 7 val typeMap: Map[Class[_], Class[_ <: UserDefinedType[_]]] = Map( classOf[Geometry] -> classOf[GeometryUDT], classOf[Point] -> classOf[PointUDT], classOf[LineString] -> classOf[LineStringUDT], classOf[Polygon] -> classOf[PolygonUDT], classOf[MultiPoint] -> classOf[MultiPointUDT], classOf[MultiLineString] -> classOf[MultiLineStringUDT], classOf[MultiPolygon] -> classOf[MultiPolygonUDT], classOf[GeometryCollection] -> classOf[GeometryCollectionUDT] ) } private [spark] class PointUDT extends AbstractGeometryUDT[Point]("point") object PointUDT extends PointUDT private [spark] class MultiPointUDT extends AbstractGeometryUDT[MultiPoint]("multipoint") object MultiPointUDT extends MultiPointUDT private [spark] class LineStringUDT extends AbstractGeometryUDT[LineString]("linestring") object LineStringUDT extends LineStringUDT private [spark] class MultiLineStringUDT extends AbstractGeometryUDT[MultiLineString]("multilinestring") object MultiLineStringUDT extends MultiLineStringUDT private [spark] class PolygonUDT extends AbstractGeometryUDT[Polygon]("polygon") object PolygonUDT extends PolygonUDT private [spark] class MultiPolygonUDT extends AbstractGeometryUDT[MultiPolygon]("multipolygon") object MultiPolygonUDT extends MultiPolygonUDT private [spark] class GeometryUDT extends AbstractGeometryUDT[Geometry]("geometry") { private[sql] override def acceptsType(dataType: DataType): Boolean = { super.acceptsType(dataType) || dataType.getClass == JTSTypes.GeometryTypeInstance.getClass || dataType.getClass == JTSTypes.PointTypeInstance.getClass || dataType.getClass == JTSTypes.LineStringTypeInstance.getClass || dataType.getClass == JTSTypes.PolygonTypeInstance.getClass || dataType.getClass == JTSTypes.MultiLineStringTypeInstance.getClass || dataType.getClass == JTSTypes.MultiPointTypeInstance.getClass || dataType.getClass == JTSTypes.MultipolygonTypeInstance.getClass || dataType.getClass == JTSTypes.GeometryCollectionTypeInstance.getClass } } case object GeometryUDT extends GeometryUDT private [spark] class GeometryCollectionUDT extends AbstractGeometryUDT[GeometryCollection]("geometrycollection") object GeometryCollectionUDT extends GeometryCollectionUDT
ddseapy/geomesa
geomesa-spark/geomesa-spark-jts/src/main/scala/org/apache/spark/sql/jts/JTSTypes.scala
Scala
apache-2.0
3,748
package com.schmitztech.hadoop import collection.JavaConversions._ import com.schmitztech.hadoop.Implicits._ import org.apache.hadoop.mapreduce.lib.input._ import org.apache.hadoop.mapreduce.lib.output._ import org.apache.hadoop.mapreduce._ import org.apache.hadoop.io._ /** Common Reducers */ object Reducers { class IdentityReducer[K, V] extends Reducer[K, V, K, V] { override def reduce(key: K, values: java.lang.Iterable[V], context:Reducer[K,V,K,V]#Context) { for (v <- values) { context write (key, v) } } } /** Alternative to org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer */ class IntSumReducer[T] extends Reducer[T, IntWritable, T, IntWritable] { override def reduce(key: T, values: java.lang.Iterable[IntWritable], context:Reducer[T,IntWritable,T,IntWritable]#Context) { val sum = values.reduceRight(_+_) context write (key, sum) } } /** Alternative to org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer */ class LongSumReducer[T] extends Reducer[T, LongWritable, T, LongWritable] { override def reduce(key: T, values: java.lang.Iterable[LongWritable], context:Reducer[T,LongWritable,T,LongWritable]#Context) { val sum = values.reduceRight(_+_) context write (key, sum) } } }
schmmd/Hadoop-Scala-Commons
src/main/scala/com/schmitztech/hadoop/Reducers.scala
Scala
apache-2.0
1,338
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // scalastyle:off println package org.apache.spark.examples.mllib import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer import org.apache.spark.mllib.util.MLUtils import org.apache.spark.{SparkConf, SparkContext} import scopt.OptionParser /** * An example app for summarizing multivariate data from a file. Run with * {{{ * bin/run-example org.apache.spark.examples.mllib.MultivariateSummarizer * }}} * By default, this loads a synthetic dataset from `data/mllib/sample_linear_regression_data.txt`. * If you use it as a template to create your own app, please use `spark-submit` to submit your app. */ object MultivariateSummarizer { case class Params(input: String = "data/mllib/sample_linear_regression_data.txt") extends AbstractParams[Params] def main(args: Array[String]) { val defaultParams = Params() val parser = new OptionParser[Params]("MultivariateSummarizer") { head("MultivariateSummarizer: an example app for MultivariateOnlineSummarizer") opt[String]("input") .text(s"Input path to labeled examples in LIBSVM format, default: ${defaultParams.input}") .action((x, c) => c.copy(input = x)) note( """ |For example, the following command runs this app on a synthetic dataset: | | bin/spark-submit --class org.apache.spark.examples.mllib.MultivariateSummarizer \ | examples/target/scala-*/spark-examples-*.jar \ | --input data/mllib/sample_linear_regression_data.txt """.stripMargin) } parser.parse(args, defaultParams) match { case Some(params) => run(params) case _ => sys.exit(1) } } def run(params: Params): Unit = { val conf = new SparkConf().setAppName(s"MultivariateSummarizer with $params") val sc = new SparkContext(conf) val examples = MLUtils.loadLibSVMFile(sc, params.input).cache() println(s"Summary of data file: ${params.input}") println(s"${examples.count()} data points") // Summarize labels val labelSummary = examples.aggregate(new MultivariateOnlineSummarizer())( (summary, lp) => summary.add(Vectors.dense(lp.label)), (sum1, sum2) => sum1.merge(sum2)) // Summarize features val featureSummary = examples.aggregate(new MultivariateOnlineSummarizer())( (summary, lp) => summary.add(lp.features), (sum1, sum2) => sum1.merge(sum2)) println() println(s"Summary statistics") println(s"\tLabel\tFeatures") println(s"mean\t${labelSummary.mean(0)}\t${featureSummary.mean.toArray.mkString("\t")}") println(s"var\t${labelSummary.variance(0)}\t${featureSummary.variance.toArray.mkString("\t")}") println( s"nnz\t${labelSummary.numNonzeros(0)}\t${featureSummary.numNonzeros.toArray.mkString("\t")}") println(s"max\t${labelSummary.max(0)}\t${featureSummary.max.toArray.mkString("\t")}") println(s"min\t${labelSummary.min(0)}\t${featureSummary.min.toArray.mkString("\t")}") println() sc.stop() } } // scalastyle:on println
chgm1006/spark-app
src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
Scala
apache-2.0
4,121
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.recommendation import java.io.File import java.util.Random import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.collection.JavaConverters._ import scala.language.existentials import com.github.fommil.netlib.BLAS.{getInstance => blas} import org.apache.commons.io.FileUtils import org.apache.commons.io.filefilter.TrueFileFilter import org.apache.spark._ import org.apache.spark.internal.Logging import org.apache.spark.ml.linalg.Vectors import org.apache.spark.ml.recommendation.ALS._ import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils} import org.apache.spark.ml.util.TestingUtils._ import org.apache.spark.mllib.util.MLlibTestSparkContext import org.apache.spark.rdd.RDD import org.apache.spark.scheduler.{SparkListener, SparkListenerStageCompleted} import org.apache.spark.sql.{DataFrame, Row, SparkSession} import org.apache.spark.sql.types.{FloatType, IntegerType} import org.apache.spark.storage.StorageLevel import org.apache.spark.util.Utils class ALSSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest with Logging { override def beforeAll(): Unit = { super.beforeAll() sc.setCheckpointDir(tempDir.getAbsolutePath) } override def afterAll(): Unit = { super.afterAll() } test("LocalIndexEncoder") { val random = new Random for (numBlocks <- Seq(1, 2, 5, 10, 20, 50, 100)) { val encoder = new LocalIndexEncoder(numBlocks) val maxLocalIndex = Int.MaxValue / numBlocks val tests = Seq.fill(5)((random.nextInt(numBlocks), random.nextInt(maxLocalIndex))) ++ Seq((0, 0), (numBlocks - 1, maxLocalIndex)) tests.foreach { case (blockId, localIndex) => val err = s"Failed with numBlocks=$numBlocks, blockId=$blockId, and localIndex=$localIndex." val encoded = encoder.encode(blockId, localIndex) assert(encoder.blockId(encoded) === blockId, err) assert(encoder.localIndex(encoded) === localIndex, err) } } } test("normal equation construction") { val k = 2 val ne0 = new NormalEquation(k) .add(Array(1.0f, 2.0f), 3.0) .add(Array(4.0f, 5.0f), 6.0, 2.0) // weighted assert(ne0.k === k) assert(ne0.triK === k * (k + 1) / 2) // NumPy code that computes the expected values: // A = np.matrix("1 2; 4 5") // b = np.matrix("3; 6") // C = np.matrix(np.diag([1, 2])) // ata = A.transpose() * C * A // atb = A.transpose() * C * b assert(Vectors.dense(ne0.ata) ~== Vectors.dense(33.0, 42.0, 54.0) relTol 1e-8) assert(Vectors.dense(ne0.atb) ~== Vectors.dense(51.0, 66.0) relTol 1e-8) val ne1 = new NormalEquation(2) .add(Array(7.0f, 8.0f), 9.0) ne0.merge(ne1) // NumPy code that computes the expected values: // A = np.matrix("1 2; 4 5; 7 8") // b = np.matrix("3; 6; 9") // C = np.matrix(np.diag([1, 2, 1])) // ata = A.transpose() * C * A // atb = A.transpose() * C * b assert(Vectors.dense(ne0.ata) ~== Vectors.dense(82.0, 98.0, 118.0) relTol 1e-8) assert(Vectors.dense(ne0.atb) ~== Vectors.dense(114.0, 138.0) relTol 1e-8) intercept[IllegalArgumentException] { ne0.add(Array(1.0f), 2.0) } intercept[IllegalArgumentException] { ne0.add(Array(1.0f, 2.0f, 3.0f), 4.0) } intercept[IllegalArgumentException] { ne0.add(Array(1.0f, 2.0f), 0.0, -1.0) } intercept[IllegalArgumentException] { val ne2 = new NormalEquation(3) ne0.merge(ne2) } ne0.reset() assert(ne0.ata.forall(_ == 0.0)) assert(ne0.atb.forall(_ == 0.0)) } test("CholeskySolver") { val k = 2 val ne0 = new NormalEquation(k) .add(Array(1.0f, 2.0f), 4.0) .add(Array(1.0f, 3.0f), 9.0) .add(Array(1.0f, 4.0f), 16.0) val ne1 = new NormalEquation(k) .merge(ne0) val chol = new CholeskySolver val x0 = chol.solve(ne0, 0.0).map(_.toDouble) // NumPy code that computes the expected solution: // A = np.matrix("1 2; 1 3; 1 4") // b = b = np.matrix("3; 6") // x0 = np.linalg.lstsq(A, b)[0] assert(Vectors.dense(x0) ~== Vectors.dense(-8.333333, 6.0) relTol 1e-6) assert(ne0.ata.forall(_ == 0.0)) assert(ne0.atb.forall(_ == 0.0)) val x1 = chol.solve(ne1, 1.5).map(_.toDouble) // NumPy code that computes the expected solution, where lambda is scaled by n: // x0 = np.linalg.solve(A.transpose() * A + 1.5 * np.eye(2), A.transpose() * b) assert(Vectors.dense(x1) ~== Vectors.dense(-0.1155556, 3.28) relTol 1e-6) } test("RatingBlockBuilder") { val emptyBuilder = new RatingBlockBuilder[Int]() assert(emptyBuilder.size === 0) val emptyBlock = emptyBuilder.build() assert(emptyBlock.srcIds.isEmpty) assert(emptyBlock.dstIds.isEmpty) assert(emptyBlock.ratings.isEmpty) val builder0 = new RatingBlockBuilder() .add(Rating(0, 1, 2.0f)) .add(Rating(3, 4, 5.0f)) assert(builder0.size === 2) val builder1 = new RatingBlockBuilder() .add(Rating(6, 7, 8.0f)) .merge(builder0.build()) assert(builder1.size === 3) val block = builder1.build() val ratings = Seq.tabulate(block.size) { i => (block.srcIds(i), block.dstIds(i), block.ratings(i)) }.toSet assert(ratings === Set((0, 1, 2.0f), (3, 4, 5.0f), (6, 7, 8.0f))) } test("UncompressedInBlock") { val encoder = new LocalIndexEncoder(10) val uncompressed = new UncompressedInBlockBuilder[Int](encoder) .add(0, Array(1, 0, 2), Array(0, 1, 4), Array(1.0f, 2.0f, 3.0f)) .add(1, Array(3, 0), Array(2, 5), Array(4.0f, 5.0f)) .build() assert(uncompressed.length === 5) val records = Seq.tabulate(uncompressed.length) { i => val dstEncodedIndex = uncompressed.dstEncodedIndices(i) val dstBlockId = encoder.blockId(dstEncodedIndex) val dstLocalIndex = encoder.localIndex(dstEncodedIndex) (uncompressed.srcIds(i), dstBlockId, dstLocalIndex, uncompressed.ratings(i)) }.toSet val expected = Set((1, 0, 0, 1.0f), (0, 0, 1, 2.0f), (2, 0, 4, 3.0f), (3, 1, 2, 4.0f), (0, 1, 5, 5.0f)) assert(records === expected) val compressed = uncompressed.compress() assert(compressed.size === 5) assert(compressed.srcIds.toSeq === Seq(0, 1, 2, 3)) assert(compressed.dstPtrs.toSeq === Seq(0, 2, 3, 4, 5)) var decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)] var i = 0 while (i < compressed.srcIds.length) { var j = compressed.dstPtrs(i) while (j < compressed.dstPtrs(i + 1)) { val dstEncodedIndex = compressed.dstEncodedIndices(j) val dstBlockId = encoder.blockId(dstEncodedIndex) val dstLocalIndex = encoder.localIndex(dstEncodedIndex) decompressed += ((compressed.srcIds(i), dstBlockId, dstLocalIndex, compressed.ratings(j))) j += 1 } i += 1 } assert(decompressed.toSet === expected) } /** * Generates an explicit feedback dataset for testing ALS. * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genExplicitTestData( numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { val trainingFraction = 0.6 val testFraction = 0.3 val totalFraction = trainingFraction + testFraction val random = new Random(seed) val userFactors = genFactors(numUsers, rank, random) val itemFactors = genFactors(numItems, rank, random) val training = ArrayBuffer.empty[Rating[Int]] val test = ArrayBuffer.empty[Rating[Int]] for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) { val x = random.nextDouble() if (x < totalFraction) { val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1) if (x < trainingFraction) { val noise = noiseStd * random.nextGaussian() training += Rating(userId, itemId, rating + noise.toFloat) } else { test += Rating(userId, itemId, rating) } } } logInfo(s"Generated an explicit feedback dataset with ${training.size} ratings for training " + s"and ${test.size} for test.") (sc.parallelize(training, 2), sc.parallelize(test, 2)) } /** * Generates an implicit feedback dataset for testing ALS. * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genImplicitTestData( numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { ALSSuite.genImplicitTestData(sc, numUsers, numItems, rank, noiseStd, seed) } /** * Generates random user/item factors, with i.i.d. values drawn from U(a, b). * @param size number of users/items * @param rank number of features * @param random random number generator * @param a min value of the support (default: -1) * @param b max value of the support (default: 1) * @return a sequence of (ID, factors) pairs */ private def genFactors( size: Int, rank: Int, random: Random, a: Float = -1.0f, b: Float = 1.0f): Seq[(Int, Array[Float])] = { ALSSuite.genFactors(size, rank, random, a, b) } /** * Test ALS using the given training/test splits and parameters. * @param training training dataset * @param test test dataset * @param rank rank of the matrix factorization * @param maxIter max number of iterations * @param regParam regularization constant * @param implicitPrefs whether to use implicit preference * @param numUserBlocks number of user blocks * @param numItemBlocks number of item blocks * @param targetRMSE target test RMSE */ def testALS( training: RDD[Rating[Int]], test: RDD[Rating[Int]], rank: Int, maxIter: Int, regParam: Double, implicitPrefs: Boolean = false, numUserBlocks: Int = 2, numItemBlocks: Int = 3, targetRMSE: Double = 0.05): Unit = { val spark = this.spark import spark.implicits._ val als = new ALS() .setRank(rank) .setRegParam(regParam) .setImplicitPrefs(implicitPrefs) .setNumUserBlocks(numUserBlocks) .setNumItemBlocks(numItemBlocks) .setSeed(0) val alpha = als.getAlpha val model = als.fit(training.toDF()) val predictions = model.transform(test.toDF()).select("rating", "prediction").rdd.map { case Row(rating: Float, prediction: Float) => (rating.toDouble, prediction.toDouble) } val rmse = if (implicitPrefs) { // TODO: Use a better (rank-based?) evaluation metric for implicit feedback. // We limit the ratings and the predictions to interval [0, 1] and compute the weighted RMSE // with the confidence scores as weights. val (totalWeight, weightedSumSq) = predictions.map { case (rating, prediction) => val confidence = 1.0 + alpha * math.abs(rating) val rating01 = math.max(math.min(rating, 1.0), 0.0) val prediction01 = math.max(math.min(prediction, 1.0), 0.0) val err = prediction01 - rating01 (confidence, confidence * err * err) }.reduce { case ((c0, e0), (c1, e1)) => (c0 + c1, e0 + e1) } math.sqrt(weightedSumSq / totalWeight) } else { val mse = predictions.map { case (rating, prediction) => val err = rating - prediction err * err }.mean() math.sqrt(mse) } logInfo(s"Test RMSE is $rmse.") assert(rmse < targetRMSE) // copied model must have the same parent. MLTestingUtils.checkCopy(model) } test("exact rank-1 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1) testALS(training, test, maxIter = 1, rank = 1, regParam = 1e-5, targetRMSE = 0.001) testALS(training, test, maxIter = 1, rank = 2, regParam = 1e-5, targetRMSE = 0.001) } test("approximate rank-1 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1, noiseStd = 0.01) testALS(training, test, maxIter = 2, rank = 1, regParam = 0.01, targetRMSE = 0.02) testALS(training, test, maxIter = 2, rank = 2, regParam = 0.01, targetRMSE = 0.02) } test("approximate rank-2 matrix") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, targetRMSE = 0.03) testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03) } test("different block settings") { val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) for ((numUserBlocks, numItemBlocks) <- Seq((1, 1), (1, 2), (2, 1), (2, 2))) { testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03, numUserBlocks = numUserBlocks, numItemBlocks = numItemBlocks) } } test("more blocks than ratings") { val (training, test) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) testALS(training, test, maxIter = 2, rank = 1, regParam = 1e-4, targetRMSE = 0.002, numItemBlocks = 5, numUserBlocks = 5) } test("implicit feedback") { val (training, test) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, implicitPrefs = true, targetRMSE = 0.3) } test("using generic ID types") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val longRatings = ratings.map(r => Rating(r.user.toLong, r.item.toLong, r.rating)) val (longUserFactors, _) = ALS.train(longRatings, rank = 2, maxIter = 4, seed = 0) assert(longUserFactors.first()._1.getClass === classOf[Long]) val strRatings = ratings.map(r => Rating(r.user.toString, r.item.toString, r.rating)) val (strUserFactors, _) = ALS.train(strRatings, rank = 2, maxIter = 4, seed = 0) assert(strUserFactors.first()._1.getClass === classOf[String]) } test("nonnegative constraint") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val (userFactors, itemFactors) = ALS.train(ratings, rank = 2, maxIter = 4, nonnegative = true, seed = 0) def isNonnegative(factors: RDD[(Int, Array[Float])]): Boolean = { factors.values.map { _.forall(_ >= 0.0) }.reduce(_ && _) } assert(isNonnegative(userFactors)) assert(isNonnegative(itemFactors)) // TODO: Validate the solution. } test("als partitioner is a projection") { for (p <- Seq(1, 10, 100, 1000)) { val part = new ALSPartitioner(p) var k = 0 while (k < p) { assert(k === part.getPartition(k)) assert(k === part.getPartition(k.toLong)) k += 1 } } } test("partitioner in returned factors") { val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01) val (userFactors, itemFactors) = ALS.train( ratings, rank = 2, maxIter = 4, numUserBlocks = 3, numItemBlocks = 4, seed = 0) for ((tpe, factors) <- Seq(("User", userFactors), ("Item", itemFactors))) { assert(userFactors.partitioner.isDefined, s"$tpe factors should have partitioner.") val part = userFactors.partitioner.get userFactors.mapPartitionsWithIndex { (idx, items) => items.foreach { case (id, _) => if (part.getPartition(id) != idx) { throw new SparkException(s"$tpe with ID $id should not be in partition $idx.") } } Iterator.empty }.count() } } test("als with large number of iterations") { val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, seed = 0) ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, implicitPrefs = true, seed = 0) } test("read/write") { import ALSSuite._ val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1) val als = new ALS() allEstimatorParamSettings.foreach { case (p, v) => als.set(als.getParam(p), v) } val spark = this.spark import spark.implicits._ val model = als.fit(ratings.toDF()) // Test Estimator save/load val als2 = testDefaultReadWrite(als) allEstimatorParamSettings.foreach { case (p, v) => val param = als.getParam(p) assert(als.get(param).get === als2.get(param).get) } // Test Model save/load val model2 = testDefaultReadWrite(model) allModelParamSettings.foreach { case (p, v) => val param = model.getParam(p) assert(model.get(param).get === model2.get(param).get) } assert(model.rank === model2.rank) def getFactors(df: DataFrame): Set[(Int, Array[Float])] = { df.select("id", "features").collect().map { case r => (r.getInt(0), r.getAs[Array[Float]](1)) }.toSet } assert(getFactors(model.userFactors) === getFactors(model2.userFactors)) assert(getFactors(model.itemFactors) === getFactors(model2.itemFactors)) } test("input type validation") { val spark = this.spark import spark.implicits._ // check that ALS can handle all numeric types for rating column // and user/item columns (when the user/item ids are within Int range) val als = new ALS().setMaxIter(1).setRank(1) Seq(("user", IntegerType), ("item", IntegerType), ("rating", FloatType)).foreach { case (colName, sqlType) => MLTestingUtils.checkNumericTypesALS(als, spark, colName, sqlType) { (ex, act) => ex.userFactors.first().getSeq[Float](1) === act.userFactors.first.getSeq[Float](1) } { (ex, act, _) => ex.transform(_: DataFrame).select("prediction").first.getFloat(0) ~== act.transform(_: DataFrame).select("prediction").first.getFloat(0) absTol 1e-6 } } // check user/item ids falling outside of Int range val big = Int.MaxValue.toLong + 1 val small = Int.MinValue.toDouble - 1 val df = Seq( (0, 0L, 0d, 1, 1L, 1d, 3.0), (0, big, small, 0, big, small, 2.0), (1, 1L, 1d, 0, 0L, 0d, 5.0) ).toDF("user", "user_big", "user_small", "item", "item_big", "item_small", "rating") withClue("fit should fail when ids exceed integer range. ") { assert(intercept[SparkException] { als.fit(df.select(df("user_big").as("user"), df("item"), df("rating"))) }.getCause.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { als.fit(df.select(df("user_small").as("user"), df("item"), df("rating"))) }.getCause.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { als.fit(df.select(df("item_big").as("item"), df("user"), df("rating"))) }.getCause.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { als.fit(df.select(df("item_small").as("item"), df("user"), df("rating"))) }.getCause.getMessage.contains("was out of Integer range")) } withClue("transform should fail when ids exceed integer range. ") { val model = als.fit(df) assert(intercept[SparkException] { model.transform(df.select(df("user_big").as("user"), df("item"))).first }.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { model.transform(df.select(df("user_small").as("user"), df("item"))).first }.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { model.transform(df.select(df("item_big").as("item"), df("user"))).first }.getMessage.contains("was out of Integer range")) assert(intercept[SparkException] { model.transform(df.select(df("item_small").as("item"), df("user"))).first }.getMessage.contains("was out of Integer range")) } } } class ALSCleanerSuite extends SparkFunSuite { test("ALS shuffle cleanup standalone") { val conf = new SparkConf() val localDir = Utils.createTempDir() val checkpointDir = Utils.createTempDir() def getAllFiles: Set[File] = FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet try { conf.set("spark.local.dir", localDir.getAbsolutePath) val sc = new SparkContext("local[2]", "test", conf) try { sc.setCheckpointDir(checkpointDir.getAbsolutePath) // Test checkpoint and clean parents val input = sc.parallelize(1 to 1000) val keyed = input.map(x => (x % 20, 1)) val shuffled = keyed.reduceByKey(_ + _) val keysOnly = shuffled.keys val deps = keysOnly.dependencies keysOnly.count() ALS.cleanShuffleDependencies(sc, deps, true) val resultingFiles = getAllFiles assert(resultingFiles === Set()) // Ensure running count again works fine even if we kill the shuffle files. keysOnly.count() } finally { sc.stop() } } finally { Utils.deleteRecursively(localDir) Utils.deleteRecursively(checkpointDir) } } test("ALS shuffle cleanup in algorithm") { val conf = new SparkConf() val localDir = Utils.createTempDir() val checkpointDir = Utils.createTempDir() def getAllFiles: Set[File] = FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet try { conf.set("spark.local.dir", localDir.getAbsolutePath) val sc = new SparkContext("local[2]", "test", conf) try { sc.setCheckpointDir(checkpointDir.getAbsolutePath) // Generate test data val (training, _) = ALSSuite.genImplicitTestData(sc, 20, 5, 1, 0.2, 0) // Implicitly test the cleaning of parents during ALS training val spark = SparkSession.builder .master("local[2]") .appName("ALSCleanerSuite") .sparkContext(sc) .getOrCreate() import spark.implicits._ val als = new ALS() .setRank(1) .setRegParam(1e-5) .setSeed(0) .setCheckpointInterval(1) .setMaxIter(7) val model = als.fit(training.toDF()) val resultingFiles = getAllFiles // We expect the last shuffles files, block ratings, user factors, and item factors to be // around but no more. val pattern = "shuffle_(\\\\d+)_.+\\\\.data".r val rddIds = resultingFiles.flatMap { f => pattern.findAllIn(f.getName()).matchData.map { _.group(1) } } assert(rddIds.size === 4) } finally { sc.stop() } } finally { Utils.deleteRecursively(localDir) Utils.deleteRecursively(checkpointDir) } } } class ALSStorageSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest with Logging { test("invalid storage params") { intercept[IllegalArgumentException] { new ALS().setIntermediateStorageLevel("foo") } intercept[IllegalArgumentException] { new ALS().setIntermediateStorageLevel("NONE") } intercept[IllegalArgumentException] { new ALS().setFinalStorageLevel("foo") } } test("default and non-default storage params set correct RDD StorageLevels") { val spark = this.spark import spark.implicits._ val data = Seq( (0, 0, 1.0), (0, 1, 2.0), (1, 2, 3.0), (1, 0, 2.0) ).toDF("user", "item", "rating") val als = new ALS().setMaxIter(1).setRank(1) // add listener to check intermediate RDD default storage levels val defaultListener = new IntermediateRDDStorageListener sc.addSparkListener(defaultListener) val model = als.fit(data) // check final factor RDD default storage levels val defaultFactorRDDs = sc.getPersistentRDDs.collect { case (id, rdd) if rdd.name == "userFactors" || rdd.name == "itemFactors" => rdd.name -> (id, rdd.getStorageLevel) }.toMap defaultFactorRDDs.foreach { case (_, (id, level)) => assert(level == StorageLevel.MEMORY_AND_DISK) } defaultListener.storageLevels.foreach(level => assert(level == StorageLevel.MEMORY_AND_DISK)) // add listener to check intermediate RDD non-default storage levels val nonDefaultListener = new IntermediateRDDStorageListener sc.addSparkListener(nonDefaultListener) val nonDefaultModel = als .setFinalStorageLevel("MEMORY_ONLY") .setIntermediateStorageLevel("DISK_ONLY") .fit(data) // check final factor RDD non-default storage levels val levels = sc.getPersistentRDDs.collect { case (id, rdd) if rdd.name == "userFactors" && rdd.id != defaultFactorRDDs("userFactors")._1 || rdd.name == "itemFactors" && rdd.id != defaultFactorRDDs("itemFactors")._1 => rdd.getStorageLevel } levels.foreach(level => assert(level == StorageLevel.MEMORY_ONLY)) nonDefaultListener.storageLevels.foreach(level => assert(level == StorageLevel.DISK_ONLY)) } } private class IntermediateRDDStorageListener extends SparkListener { val storageLevels: mutable.ArrayBuffer[StorageLevel] = mutable.ArrayBuffer() override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { val stageLevels = stageCompleted.stageInfo.rddInfos.collect { case info if info.name.contains("Blocks") || info.name.contains("Factors-") => info.storageLevel } storageLevels ++= stageLevels } } object ALSSuite extends Logging { /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allModelParamSettings: Map[String, Any] = Map( "predictionCol" -> "myPredictionCol" ) /** * Mapping from all Params to valid settings which differ from the defaults. * This is useful for tests which need to exercise all Params, such as save/load. * This excludes input columns to simplify some tests. */ val allEstimatorParamSettings: Map[String, Any] = allModelParamSettings ++ Map( "maxIter" -> 1, "rank" -> 1, "regParam" -> 0.01, "numUserBlocks" -> 2, "numItemBlocks" -> 2, "implicitPrefs" -> true, "alpha" -> 0.9, "nonnegative" -> true, "checkpointInterval" -> 20, "intermediateStorageLevel" -> "MEMORY_ONLY", "finalStorageLevel" -> "MEMORY_AND_DISK_SER" ) // Helper functions to generate test data we share between ALS test suites /** * Generates random user/item factors, with i.i.d. values drawn from U(a, b). * @param size number of users/items * @param rank number of features * @param random random number generator * @param a min value of the support (default: -1) * @param b max value of the support (default: 1) * @return a sequence of (ID, factors) pairs */ private def genFactors( size: Int, rank: Int, random: Random, a: Float = -1.0f, b: Float = 1.0f): Seq[(Int, Array[Float])] = { require(size > 0 && size < Int.MaxValue / 3) require(b > a) val ids = mutable.Set.empty[Int] while (ids.size < size) { ids += random.nextInt() } val width = b - a ids.toSeq.sorted.map(id => (id, Array.fill(rank)(a + random.nextFloat() * width))) } /** * Generates an implicit feedback dataset for testing ALS. * * @param sc SparkContext * @param numUsers number of users * @param numItems number of items * @param rank rank * @param noiseStd the standard deviation of additive Gaussian noise on training data * @param seed random seed * @return (training, test) */ def genImplicitTestData( sc: SparkContext, numUsers: Int, numItems: Int, rank: Int, noiseStd: Double = 0.0, seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = { // The assumption of the implicit feedback model is that unobserved ratings are more likely to // be negatives. val positiveFraction = 0.8 val negativeFraction = 1.0 - positiveFraction val trainingFraction = 0.6 val testFraction = 0.3 val totalFraction = trainingFraction + testFraction val random = new Random(seed) val userFactors = genFactors(numUsers, rank, random) val itemFactors = genFactors(numItems, rank, random) val training = ArrayBuffer.empty[Rating[Int]] val test = ArrayBuffer.empty[Rating[Int]] for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) { val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1) val threshold = if (rating > 0) positiveFraction else negativeFraction val observed = random.nextDouble() < threshold if (observed) { val x = random.nextDouble() if (x < totalFraction) { if (x < trainingFraction) { val noise = noiseStd * random.nextGaussian() training += Rating(userId, itemId, rating + noise.toFloat) } else { test += Rating(userId, itemId, rating) } } } } logInfo(s"Generated an implicit feedback dataset with ${training.size} ratings for training " + s"and ${test.size} for test.") (sc.parallelize(training, 2), sc.parallelize(test, 2)) } }
u2009cf/spark-radar
mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala
Scala
apache-2.0
30,684
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). package org.maproulette.jobs import com.google.inject.AbstractModule import play.api.libs.concurrent.AkkaGuiceSupport /** * @author cuthbertm */ class JobModule extends AbstractModule with AkkaGuiceSupport { override def configure(): Unit = { bindActor[SchedulerActor]("scheduler-actor") bind(classOf[Scheduler]).asEagerSingleton() bind(classOf[Bootstrap]).asEagerSingleton() } }
mvexel/maproulette2
app/org/maproulette/jobs/JobModule.scala
Scala
apache-2.0
537
package mesosphere.marathon package stream import java.util import java.util.stream.{ DoubleStream, IntStream, LongStream, Stream, StreamSupport } import java.util.{ Spliterator, Spliterators } import mesosphere.marathon.functional._ import mesosphere.marathon.stream.StreamConversions._ import scala.collection.immutable import scala.collection.immutable.Seq import scala.compat.java8.OptionConverters._ import scala.compat.java8.StreamConverters /** * Enriches a Java Stream to appear as if its a scala collection that can be traversed once. */ class RichStream[T](val stream: Stream[T]) extends AnyVal with TraversableOnce[T] { override def foreach[U](f: (T) => U): Unit = stream.forEach(f) override def isEmpty: Boolean = false override def hasDefiniteSize: Boolean = false override def seq: Seq[T] = toTraversable override def forall(p: (T) => Boolean): Boolean = stream.allMatch(p) override def exists(p: (T) => Boolean): Boolean = stream.anyMatch(p) override def find(p: (T) => Boolean): Option[T] = toScala(stream.filter(p).findFirst()) override def copyToArray[B >: T](xs: Array[B], start: Int, len: Int): Unit = toStream.copyToArray(xs, start, len) override def toTraversable: Seq[T] = stream.collect(Collectors.seq[T]) override def isTraversableAgain: Boolean = false override def toStream: immutable.Stream[T] = StreamConverters.RichStream(stream).toScala[immutable.Stream] override def toIterator: Iterator[T] = StreamConverters.RichStream(stream).toScala[Iterator] def distinct(): RichStream[T] = stream.distinct() def drop(l: Long): RichStream[T] = stream.skip(l) def take(l: Long): RichStream[T] = stream.limit(l) def max()(implicit order: Ordering[T]): Option[T] = toScala(stream.max(order)) def min()(implicit order: Ordering[T]): Option[T] = toScala(stream.min(order)) } /** * Enriches a Java Stream to appear as if its a scala collection that can be traversed once. */ class RichDoubleStream(val stream: DoubleStream) extends AnyVal with TraversableOnce[Double] { override def foreach[U](f: (Double) => U): Unit = stream.forEach(f) override def isEmpty: Boolean = false override def hasDefiniteSize: Boolean = false override def seq: Seq[Double] = toTraversable override def forall(p: (Double) => Boolean): Boolean = stream.allMatch(p) override def exists(p: (Double) => Boolean): Boolean = stream.anyMatch(p) override def find(p: (Double) => Boolean): Option[Double] = toScala(stream.filter(p).findFirst()) override def copyToArray[B >: Double](xs: Array[B], start: Int, len: Int): Unit = toStream.copyToArray(xs, start, len) override def toTraversable: Seq[Double] = { val supplier = () => Vector.empty[Double] val accumulator = (buf: Vector[Double], v: Double) => buf :+ v val collector = (b1: Vector[Double], b2: Vector[Double]) => b1 ++ b2 stream.collect(supplier, accumulator, collector) } override def isTraversableAgain: Boolean = false override def toStream: immutable.Stream[Double] = StreamConverters.RichDoubleStream(stream).toScala[immutable.Stream] override def toIterator: Iterator[Double] = StreamConverters.RichDoubleStream(stream).toScala[Iterator] def distinct(): RichDoubleStream = stream.distinct() def drop(l: Long): RichDoubleStream = stream.skip(l) def take(l: Long): RichDoubleStream = stream.limit(l) def map[R](f: Double => R): RichStream[R] = stream.map(f) def max()(implicit order: Ordering[Double]): Double = stream.max(order) def min()(implicit order: Ordering[Double]): Double = stream.min(order) } /** * Enriches a Java Stream to appear as if its a scala collection that can be traversed once. */ class RichIntStream(val stream: IntStream) extends AnyVal with TraversableOnce[Int] { override def foreach[U](f: (Int) => U): Unit = stream.forEach(f) override def isEmpty: Boolean = false override def hasDefiniteSize: Boolean = false override def seq: Seq[Int] = toTraversable override def forall(p: (Int) => Boolean): Boolean = stream.allMatch(p) override def exists(p: (Int) => Boolean): Boolean = stream.anyMatch(p) override def find(p: (Int) => Boolean): Option[Int] = toScala(stream.filter(p).findFirst()) override def copyToArray[B >: Int](xs: Array[B], start: Int, len: Int): Unit = toStream.copyToArray(xs, start, len) override def toTraversable: Seq[Int] = { val supplier = () => Vector.empty[Int] val accumulator = (buf: Vector[Int], v: Int) => buf :+ v val collector = (b1: Vector[Int], b2: Vector[Int]) => b1 ++ b2 stream.collect(supplier, accumulator, collector) } override def isTraversableAgain: Boolean = false override def toStream: immutable.Stream[Int] = StreamConverters.RichIntStream(stream).toScala[immutable.Stream] override def toIterator: Iterator[Int] = StreamConverters.RichIntStream(stream).toScala[Iterator] def distinct(): RichIntStream = stream.distinct() def drop(l: Long): RichIntStream = stream.skip(l) def take(l: Long): RichIntStream = stream.limit(l) def map[R](f: Int => R): RichIntStream = stream.map(f) def max()(implicit order: Ordering[Int]): Int = stream.max(order) def min()(implicit order: Ordering[Int]): Int = stream.min(order) } /** * Enriches a Java Stream to appear as if its a scala collection that can be traversed once. */ class RichLongStream(val stream: LongStream) extends AnyVal with TraversableOnce[Long] { override def foreach[U](f: (Long) => U): Unit = stream.forEach(f) override def isEmpty: Boolean = false override def hasDefiniteSize: Boolean = false override def seq: Seq[Long] = toTraversable override def forall(p: (Long) => Boolean): Boolean = stream.anyMatch(p) override def exists(p: (Long) => Boolean): Boolean = stream.allMatch(p) override def find(p: (Long) => Boolean): Option[Long] = toScala(stream.filter(p).findFirst()) override def copyToArray[B >: Long](xs: Array[B], start: Int, len: Int): Unit = toStream.copyToArray(xs, start, len) override def toTraversable: Seq[Long] = { val supplier = () => Vector.empty[Long] val accumulator = (buf: Vector[Long], v: Long) => buf :+ v val collector = (b1: Vector[Long], b2: Vector[Long]) => b1 ++ b2 stream.collect(supplier, accumulator, collector) } override def isTraversableAgain: Boolean = false override def toStream: immutable.Stream[Long] = StreamConverters.RichLongStream(stream).toScala[immutable.Stream] override def toIterator: Iterator[Long] = StreamConverters.RichLongStream(stream).toScala[Iterator] def distinct(): RichLongStream = stream.distinct() def drop(l: Long): RichLongStream = stream.skip(l) def take(l: Long): RichLongStream = stream.limit(l) def map[R](f: Long => R): RichStream[R] = stream.map(f) def max()(implicit order: Ordering[Long]): Long = stream.max(order) def min()(implicit order: Ordering[Long]): Long = stream.min(order) } /** * Enriches a Enumerator by using the stream API to appear as if its a scala collection that can be traversed once. */ class RichEnumeration[T](enum: util.Enumeration[T]) extends TraversableOnce[T] { val stream = StreamSupport.stream( Spliterators.spliteratorUnknownSize(new util.Iterator[T] { override def hasNext: Boolean = enum.hasMoreElements override def next(): T = enum.nextElement() }, Spliterator.ORDERED), false) override def foreach[U](f: (T) => U): Unit = stream.foreach(f) override def isEmpty: Boolean = enum.hasMoreElements override def hasDefiniteSize: Boolean = false override def seq: Seq[T] = stream.seq override def forall(p: (T) => Boolean): Boolean = stream.forall(p) override def exists(p: (T) => Boolean): Boolean = stream.exists(p) override def find(p: (T) => Boolean): Option[T] = stream.find(p) override def copyToArray[B >: T](xs: Array[B], start: Int, len: Int): Unit = stream.copyToArray(xs, start, len) override def toTraversable: Seq[T] = stream.toTraversable override def isTraversableAgain: Boolean = false override def toStream: immutable.Stream[T] = stream.toStream // linter:ignore TypeToType override def toIterator: Iterator[T] = stream.toIterator def distinct(): RichStream[T] = stream.distinct() def drop(l: Long): RichStream[T] = stream.skip(l) def take(l: Long): RichStream[T] = stream.limit(l) def max()(implicit order: Ordering[T]): Option[T] = toScala(stream.max(order)) def min()(implicit order: Ordering[T]): Option[T] = toScala(stream.min(order)) }
meln1k/marathon
src/main/scala/mesosphere/marathon/stream/RichStream.scala
Scala
apache-2.0
8,511
import scala.scalajs.js import scala.scalajs.js.annotation.* class A { @JSExport(name = "__") // error def foo: Int = 1 @JSExport // error def bar__(x: Int): Int = x } object B { @JSExportTopLevel(name = "__") // ok val foo: Int = 1 @JSExportTopLevel("bar__") // ok def bar(x: Int): Int = x }
dotty-staging/dotty
tests/neg-scalajs/jsexport-double-underscore.scala
Scala
apache-2.0
313
package name.abhijitsarkar.user.repository import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.mongodb.casbah.commons.MongoDBObject import de.flapdoodle.embed.mongo.MongodStarter import de.flapdoodle.embed.mongo.config.MongodConfigBuilder import de.flapdoodle.embed.mongo.distribution.Version import name.abhijitsarkar.user.repository.MongoDBCollectionFactory.newCollection import org.bson.types.ObjectId import org.scalatest.BeforeAndAfterAll class MongoDBUserRepositorySpec extends UserRepositorySpec with BeforeAndAfterAll { implicit val system = ActorSystem("user-service") implicit def executor = system.dispatcher implicit val materializer = ActorMaterializer() val mongodConfig = new MongodConfigBuilder() .version(Version.Main.PRODUCTION) .build val starter = MongodStarter.getDefaultInstance val mongodExecutable = starter.prepare(mongodConfig) val mongod = mongodExecutable.start val host = mongodConfig.net().getServerAddress.getHostName val port = mongodConfig.net().getPort private val collection = newCollection("test", host, port) override protected val userRepository = MongoDBUserRepository(collection)(materializer) override def afterAll() { println("Cleaning up") collection.drop mongod.stop mongodExecutable.stop } override protected def dumpAllUsers = { println("Printing all users") collection.find().foreach { dbObj => println(dbObj.toMap) } } override protected def deleteAllUsers() = { collection.remove(MongoDBObject.empty) } override protected def someUserId = { new ObjectId().toString() } }
asarkar/akka
user-service/src/test/scala/name/abhijitsarkar/user/repository/MongoDBUserRepositorySpec.scala
Scala
gpl-3.0
1,636
package semverfi import org.specs._ object ShowSpec extends Specification { "showing versions" should { "show normal versions" in { Show(NormalVersion(1, 2, 3)) must_== "1.2.3" } "show pre-release versions" in { Show(PreReleaseVersion(1, 2, 3, Seq("alpha", "1"))) must_== "1.2.3-alpha.1" } "show build versions" in { Show(BuildVersion(1,2,3, Seq(), Seq("build", "1"))) must_== "1.2.3+build.1" } "show invalid versions" in { Show(Invalid("asdfasdf")) must_== "invalid: asdfasdf" } } }
softprops/semverfi
src/test/scala/show.scala
Scala
mit
546
package julienrf.json.derived import play.api.libs.json.{Reads, Json, OWrites, __} /** * Strategy to serialize a tagged type (used to discriminate sum types). * * Built-in instances live in the [[TypeTagOWrites$ companion object]]. */ trait TypeTagOWrites { /** * @param typeName Type name * @param owrites Base serializer * @return A serializer that encodes an `A` value along with its type tag */ def owrites[A](typeName: String, owrites: OWrites[A]): OWrites[A] } object TypeTagOWrites { /** * Encodes a tagged type by creating a JSON object wrapping the actual `A` JSON representation. This wrapper * is an object with just one field whose name is the type tag. * * For instance, consider the following type definition: * * {{{ * sealed trait Foo * case class Bar(s: String, i: Int) extends Foo * case object Baz extends Foo * }}} * * The JSON representation of `Bar("quux", 42)` is the following JSON object: * * {{{ * { * "Bar": { * "s": "quux", * "i": 42 * } * } * }}} */ val nested: TypeTagOWrites = new TypeTagOWrites { def owrites[A](typeName: String, owrites: OWrites[A]): OWrites[A] = OWrites[A](a => Json.obj(typeName -> owrites.writes(a))) } /** * Encodes a tagged type by adding an extra field to the base `A` JSON representation. * * For instance, consider the following type definition: * * {{{ * sealed trait Foo * case class Bar(s: String, i: Int) extends Foo * case object Baz extends Foo * }}} * * And also: * * {{{ * implicit val fooOWrites: OWrites[Foo] = derived.flat.owrites((__ \\ "type").write) * }}} * * The JSON representation of `Bar("quux", 42)` is then the following JSON object: * * {{{ * { * "type": "Bar", * "s": "quux", * "i": 42 * } * }}} * * @param tagOwrites A way to encode the type tag as a JSON object (whose fields will be merged with the base JSON representation) */ def flat(tagOwrites: OWrites[String]): TypeTagOWrites = new TypeTagOWrites { def owrites[A](typeName: String, owrites: OWrites[A]): OWrites[A] = OWrites[A](a => tagOwrites.writes(typeName) ++ owrites.writes(a)) } } /** * Strategy to deserialize a tagged type (used to discriminate sum types). * * Built-in instances live in the [[TypeTagReads$ companion object]]. */ trait TypeTagReads { /** * @param typeName Type name * @param reads Base deserializer * @return A deserializer that decodes a subtype of `A` based on the given `typeName` discriminator. */ def reads[A](typeName: String, reads: Reads[A]): Reads[A] } object TypeTagReads { /** * Decodes a JSON value encoded with [[TypeTagOWrites.nested]]. */ val nested: TypeTagReads = new TypeTagReads { def reads[A](typeName: String, reads: Reads[A]): Reads[A] = (__ \\ typeName).read(reads) } /** * Decodes a JSON value encoded with [[TypeTagOWrites.flat]]. * * @param tagReads A way to decode the type tag value. */ def flat(tagReads: Reads[String]): TypeTagReads = new TypeTagReads { def reads[A](typeName: String, reads: Reads[A]): Reads[A] = tagReads.filter(_ == typeName).flatMap(_ => reads) } }
julienrf/play-json-variants
library/src/main/scala/julienrf/json/derived/typetags.scala
Scala
mit
3,411
/* * Copyright (C) 2010-2014 GRNET S.A. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package gr.grnet.pithosj.core.command import com.twitter.finagle.httpx.Method.Head import com.twitter.finagle.httpx.{Response, Status} import gr.grnet.pithosj.core.ServiceInfo case class PingCommand(serviceInfo: ServiceInfo) extends PithosCommandSkeleton[Unit] { val httpMethod = Head val successStatuses = Set(204).map(Status.fromCode) /** * Computes that URL path parts that will follow the Pithos+ server URL * in the HTTP call. */ val serverRootPathElements = Seq(serviceInfo.rootPath, serviceInfo.uuid) def buildResultData(response: Response, startMillis: Long, stopMillis: Long): Unit = {} }
grnet/pithos-j
src/main/scala/gr/grnet/pithosj/core/command/PingCommand.scala
Scala
gpl-3.0
1,317
/* * Copyright 2011 Red Hat, Inc. and/or its affiliates. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA */ package org.infinispan.server.hotrod /* * Copyright 2011 Red Hat, Inc. and/or its affiliates. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA */ import org.infinispan.remoting.transport.Address import java.nio.charset.Charset import java.util.Arrays import org.infinispan.marshall.AbstractExternalizer import java.io.{ObjectInput, ObjectOutput} import scala.collection.JavaConversions._ /** * A Hot Rod server address * * @author Galder Zamarreño * @since 5.1 */ class ServerAddress(val host: String, val port: Int) extends Address { import ServerAddress._ // // IMPORTANT NOTE: Hot Rod protocol agrees to this calculation for a node // // address hash code calculation, so any changes to the implementation // // require modification of the protocol. // override def hashCode() = Arrays.hashCode( // "%s:%d".format(host, port).getBytes(UTF8)) override def hashCode() = (31 * host.hashCode()) + port override def equals(obj: Any): Boolean = { obj match { case s: ServerAddress => s.host == host && s.port == port case _ => false } } override def toString = "%s:%d".format(host, port) def compareTo(o: Address) : Int = { o match { case oa : ServerAddress => { var cmp = host.compareTo(oa.host) if (cmp == 0) { cmp = port - oa.port } cmp } case _ => 0 } } } object ServerAddress { // val UTF8 = Charset.forName("UTF-8") class Externalizer extends AbstractExternalizer[ServerAddress] { def writeObject(out: ObjectOutput, obj: ServerAddress) { out.writeObject(obj.host) out.writeShort(obj.port) } def readObject(in: ObjectInput): ServerAddress = { val host = in.readObject.asInstanceOf[String] val port = in.readUnsignedShort() new ServerAddress(host, port) } def getTypeClasses = setAsJavaSet( Set[java.lang.Class[_ <: ServerAddress]](classOf[ServerAddress])) } }
nmldiegues/stibt
infinispan/server/hotrod/src/main/scala/org/infinispan/server/hotrod/ServerAddress.scala
Scala
apache-2.0
3,535
/** * Interface to the Vampire first-order theorem prover. */ package at.logic.gapt.provers.vampire import at.logic.gapt.proofs.lk.base.FSequent import at.logic.gapt.formats.tptp.TPTPFOLExporter import java.io._ import at.logic.gapt.utils.logging.Logger import scala.io.Source import java.nio.file.{ Paths, Files } class VampireException( msg: String ) extends Exception( msg ) object Vampire extends Logger { def writeProblem( named_sequents: List[Tuple2[String, FSequent]], file_name: String ) = { val tptp = TPTPFOLExporter.tptp_problem_named( named_sequents ) val writer = new FileWriter( file_name ) writer.write( tptp ) writer.flush } // TODO: this does not really belong here, refactor? // executes "prog" and feeds the contents of the file at // path "in" to its standard input. private def exec( in: String ): ( Int, String ) = { if ( !vampireBinaryName.isDefined ) throw new VampireException( "Unable to determine vampire's binary name for your OS!" ) val prog = vampireBinaryName.get val p = Runtime.getRuntime.exec( prog ) val out = new OutputStreamWriter( p.getOutputStream ) out.write( Source.fromInputStream( new FileInputStream( in ) ).mkString ) out.close val str = Source.fromInputStream( p.getInputStream ).mkString p.waitFor ( p.exitValue, str ) } def writeToFile( str: String, file: String ) = { val out = new FileWriter( file ) out.write( str ) out.close } def refute( input_file: String, output_file: String ): ( Int, String ) = { val ret = exec( input_file ) writeToFile( ret._2, output_file ) ret } def refuteNamed( named_sequents: List[Tuple2[String, FSequent]], input_file: String, output_file: String ): Boolean = { writeProblem( named_sequents, input_file ) val ret = refute( input_file, output_file ) ret._1 match { case 0 if ret._2.startsWith( "Refutation found" ) => true case 0 if ret._2.startsWith( "Satisfiable" ) => false case _ => throw new VampireException( "There was a problem executing vampire!" ) } } def refute( sequents: List[FSequent], input_file: String, output_file: String ): Boolean = refuteNamed( sequents.zipWithIndex.map( p => ( "sequent" + p._2, p._1 ) ), input_file, output_file ) def refute( sequents: List[FSequent] ): Boolean = { val in_file = File.createTempFile( "gapt-vampire", ".tptp", null ) val out_file = File.createTempFile( "gapt-vampire", "vampire", null ) in_file.deleteOnExit() out_file.deleteOnExit() val ret = refute( sequents, in_file.getAbsolutePath, out_file.getAbsolutePath ) in_file.delete out_file.delete ret } val vampireBinaryName: Option[String] = { val osName = System.getProperty( "os.name" ).toLowerCase() val osArch = System.getProperty( "os.arch" ) osName match { case osName if osName.contains( "mac" ) => Some( "vampire_mac" ) case osName if osName.contains( "linux" ) && osArch.contains( "64" ) => Some( "vampire_lin64" ) case osName if osName.contains( "linux" ) && osArch.contains( "32" ) => Some( "vampire_lin32" ) case _ => None } } def isInstalled(): Boolean = { val box = List() try { Vampire.refute( box ) } catch { case e: IOException => warn( e.getMessage ) return false } return true } }
gisellemnr/gapt
src/main/scala/at/logic/gapt/provers/vampire/vampire.scala
Scala
gpl-3.0
3,421
package com.typesafe.slick.testkit.tests import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest} class JoinTest extends AsyncTest[RelationalTestDB] { import tdb.profile.api._ def testJoin = { class Categories(tag: Tag) extends Table[(Int, String)](tag, "cat_j") { def id = column[Int]("id") def name = column[String]("name") def * = (id, name) } val categories = TableQuery[Categories] class Posts(tag: Tag) extends Table[(Int, String, Int)](tag, "posts_j") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def title = column[String]("title") def category = column[Int]("category") def withCategory = Query(this) join categories def * = (id, title, category) } val posts = TableQuery[Posts] for { _ <- (categories.schema ++ posts.schema).create _ <- categories ++= Seq( (1, "Scala"), (2, "ScalaQuery"), (3, "Windows"), (4, "Software") ) _ <- posts.map(p => (p.title, p.category)) ++= Seq( ("Test Post", -1), ("Formal Language Processing in Scala, Part 5", 1), ("Efficient Parameterized Queries in ScalaQuery", 2), ("Removing Libraries and HomeGroup icons from the Windows 7 desktop", 3), ("A ScalaQuery Update", 2) ) // Implicit join q1 = (for { c <- categories p <- posts if c.id === p.category } yield (p.id, c.id, c.name, p.title)).sortBy(_._1) _ <- mark("q1", q1.map(p => (p._1, p._2)).result).map(_ shouldBe List((2,1), (3,2), (4,3), (5,2))) // Explicit inner join q2 = (for { (c,p) <- categories join posts on (_.id === _.category) } yield (p.id, c.id, c.name, p.title)).sortBy(_._1) _ <- q2.map(p => (p._1, p._2)).result.map(_ shouldBe List((2,1), (3,2), (4,3), (5,2))) q3 = posts.flatMap(_.withCategory) _ <- mark("q3", q3.result).map(_ should (_.length == 20)) q4 = (for { a1 <- categories a2 <- categories a3 <- categories a4 <- categories if a1.id === a4.id } yield a1.id).to[Set] _ <- mark("q4", q4.result).map(_ shouldBe Set(1, 2, 3, 4)) q5 = (for { c <- categories } yield (c, Rep.None[Int])).sortBy(_._1.id) _ <- mark("q5", q5.result.map(_.map(_._1._1))).map(_ shouldBe List(1,2,3,4)) } yield () } def testOptionExtendedJoin = { class Data(name: String)(tag: Tag) extends Table[(Int, String)](tag, name) { def a = column[Int]("a") def b = column[String]("b") def * = (a, b) } val xs = TableQuery(new Data("xs_jo")(_)) val ys = TableQuery(new Data("ys_jo")(_)) for { _ <- (xs.schema ++ ys.schema).create _ <- xs ++= Seq((1, "a"), (2, "b"), (3, "b"), (4, "c"), (5, "c")) _ <- ys ++= Seq((1, "a"), (2, "b"), (3, "b"), (4, "d"), (5, "d")) // Left outer, lift primitive value q1 = (xs.map(_.b) joinLeft ys.map(_.b) on (_ === _)).to[Set] r1 <- mark("q1", q1.result) r1t: Set[(String, Option[String])] = r1 _ = r1 shouldBe Set(("a",Some("a")), ("b",Some("b")), ("c",None)) // Nested left outer, lift primitive value q2 = ((xs.map(_.b) joinLeft ys.map(_.b) on (_ === _)) joinLeft ys.map(_.b) on (_._1 === _)).to[Set] r2 <- mark("q2", q2.result) r2t: Set[((String, Option[String]), Option[String])] = r2 _ = r2 shouldBe Set((("a",Some("a")),Some("a")), (("b",Some("b")),Some("b")), (("c",None),None)) // Left outer, lift non-primitive value q3 = (xs joinLeft ys on (_.b === _.b)).to[Set] r3 <- mark("q3", q3.result) r3t: Set[((Int, String), Option[(Int, String)])] = r3 _ = r3 shouldBe Set(((3,"b"),Some((3,"b"))), ((3,"b"),Some((2,"b"))), ((5,"c"),None), ((1,"a"),Some((1,"a"))), ((4,"c"),None), ((2,"b"),Some((3,"b"))), ((2,"b"),Some((2,"b")))) // Left outer, lift non-primitive value, then map to primitive q4 = (xs joinLeft ys on (_.b === _.b)).map { case (x, yo) => (x.a, yo.map(_.a)) }.to[Set] r4 <- mark("q4", q4.result) r4t: Set[(Int, Option[Int])] = r4 _ = r4 shouldBe Set((4,None), (3,Some(2)), (2,Some(3)), (2,Some(2)), (3,Some(3)), (1,Some(1)), (5,None)) // Nested left outer, lift non-primitive value q5 = ((xs joinLeft ys on (_.b === _.b)) joinLeft ys on (_._1.b === _.b)).to[Set] r5 <- mark("q5", q5.result) r5t: Set[(((Int, String), Option[(Int, String)]), Option[(Int, String)])] = r5 _ = r5 shouldBe Set( (((1,"a"),Some((1,"a"))),Some((1,"a"))), (((2,"b"),Some((2,"b"))),Some((2,"b"))), (((2,"b"),Some((2,"b"))),Some((3,"b"))), (((2,"b"),Some((3,"b"))),Some((2,"b"))), (((2,"b"),Some((3,"b"))),Some((3,"b"))), (((3,"b"),Some((2,"b"))),Some((2,"b"))), (((3,"b"),Some((2,"b"))),Some((3,"b"))), (((3,"b"),Some((3,"b"))),Some((2,"b"))), (((3,"b"),Some((3,"b"))),Some((3,"b"))), (((4,"c"),None),None), (((5,"c"),None),None) ) // Right outer, lift primitive value q6 = (ys.map(_.b) joinRight xs.map(_.b) on (_ === _)).to[Set] r6 <- mark("q6", q6.result) r6t: Set[(Option[String], String)] = r6 _ = r6 shouldBe Set((Some("a"),"a"), (Some("b"),"b"), (None,"c")) // Nested right outer, lift primitive value // (left-associative; not symmetrical to the nested left outer case) q7 = ((ys.map(_.b) joinRight xs.map(_.b) on (_ === _)) joinRight xs.map(_.b) on (_._2 === _)).to[Set] r7 <- mark("q7", q7.result) rt: Set[(Option[(Option[String], String)], String)] = r7 _ = r7 shouldBe Set((Some((Some("a"),"a")),"a"), (Some((Some("b"),"b")),"b"), (Some((None,"c")),"c")) // Right outer, lift non-primitive value q8 = (ys joinRight xs on (_.b === _.b)).to[Set] r8 <- mark("q8", q8.result) r8t: Set[(Option[(Int, String)], (Int, String))] = r8 _ = r8 shouldBe Set( (Some((1,"a")), (1,"a")), (Some((2,"b")), (2,"b")), (Some((3,"b")), (2,"b")), (Some((2,"b")), (3,"b")), (Some((3,"b")), (3,"b")), (None, (4,"c")), (None, (5,"c")) ) // Right outer, lift non-primitive value, then map to primitive q9 = (ys joinRight xs on (_.b === _.b)).map { case (yo, x) => (yo.map(_.a), x.a) }.to[Set] r9 <- mark("q9", q9.result) r9t: Set[(Option[Int], Int)] = r9 _ = r9 shouldBe Set((None,4), (Some(2),3), (Some(3),2), (Some(2),2), (Some(3),3), (Some(1),1), (None,5)) // Nested right outer, lift non-primitive value // (left-associative; not symmetrical to the nested left outer case) q10 = ((ys joinRight xs on (_.b === _.b)) joinRight xs on (_._1.map(_.b) === _.b)).to[Set] r10 <- mark("q10", q10.result) r10t: Set[(Option[(Option[(Int, String)], (Int, String))], (Int, String))] = r10 _ = r10 shouldBe Set( (Some((Some((1,"a")),(1,"a"))),(1,"a")), (Some((Some((2,"b")),(2,"b"))),(2,"b")), (Some((Some((2,"b")),(2,"b"))),(3,"b")), (Some((Some((2,"b")),(3,"b"))),(2,"b")), (Some((Some((2,"b")),(3,"b"))),(3,"b")), (Some((Some((3,"b")),(2,"b"))),(2,"b")), (Some((Some((3,"b")),(2,"b"))),(3,"b")), (Some((Some((3,"b")),(3,"b"))),(2,"b")), (Some((Some((3,"b")),(3,"b"))),(3,"b")), (None,(4,"c")), (None,(5,"c")) ) // Full outer, lift primitive values q11 = (xs.map(_.b) joinFull ys.map(_.b) on (_ === _)).to[Set] r11 <- mark("q11", q11.result) r11t: Set[(Option[String], Option[String])] = r11 _ = r11 shouldBe Set((Some("a"),Some("a")), (Some("b"),Some("b")), (Some("c"),None), (None,Some("d"))) // Full outer, lift non-primitive values q12 = (xs joinFull ys on (_.b === _.b)).to[Set] r12 <- mark("q12", q12.result) r12t: Set[(Option[(Int, String)], Option[(Int, String)])] = r12 _ = r12 shouldBe Set( (Some((1,"a")),Some((1,"a"))), (Some((2,"b")),Some((2,"b"))), (Some((2,"b")),Some((3,"b"))), (Some((3,"b")),Some((2,"b"))), (Some((3,"b")),Some((3,"b"))), (Some((4,"c")),None), (Some((5,"c")),None), (None,Some((4,"d"))), (None,Some((5,"d"))) ) } yield () } def testComputedStarProjection = { class X(tag: Tag) extends Table[(Int, Int)](tag, "x_star") { def a = column[Int]("a") def b = column[Int]("b", O.Default(2)) def * = (a, b * 10) } val xs = TableQuery[X] for { _ <- xs.schema.create _ <- xs.map(_.a) ++= Seq(1) q1 = xs joinLeft xs _ <- q1.result.map(_ shouldBe Vector(((1, 20), Some((1, 20))))) } yield () } def testZip = ifCap(rcap.zip) { class Categories(tag: Tag) extends Table[(Int, String)](tag, "cat_z") { def id = column[Int]("id") def name = column[String]("name") def * = (id, name) } val categories = TableQuery[Categories] class Posts(tag: Tag) extends Table[(Int, String, Int)](tag, "posts_z") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def title = column[String]("title") def category = column[Int]("category") def * = (id, title, category) } val posts = TableQuery[Posts] for { _ <- (categories.schema ++ posts.schema).create _ <- categories ++= Seq( (1, "Scala"), (3, "Windows"), (2, "ScalaQuery"), (4, "Software") ) _ <- posts.map(p => (p.title, p.category)) ++= Seq( ("Test Post", -1), ("Formal Language Processing in Scala, Part 5", 1), ("Efficient Parameterized Queries in ScalaQuery", 2), ("Removing Libraries and HomeGroup icons from the Windows 7 desktop", 3), ("A ScalaQuery Update", 2) ) q1 = for { (c, i) <- categories.sortBy(_.id).zipWithIndex } yield (c.id, i) _ <- mark("q1", q1.result).map(_ shouldBe List((1,0), (2,1), (3,2), (4,3))) q2 = for { (c, p) <- categories.sortBy(_.id) zip posts.sortBy(_.category) } yield (c.id, p.category) _ <- mark("q2", q2.result).map(_ shouldBe List((1,-1), (2,1), (3,2), (4,2))) q3 = for { (c, p) <- categories.sortBy(_.id) zip posts.sortBy(_.id) } yield (c.id, p.category) _ <- mark("q3", q3.result).map(_ shouldBe List((1, -1), (2, 1), (3, 2), (4, 3))) q4 = for { res <- categories.sortBy(_.id).zipWith(posts.sortBy(_.id), (c: Categories, p: Posts) => (c.id, p.category)) } yield res _ <- mark("q4", q4.result).map(_ shouldBe List((1, -1), (2, 1), (3, 2), (4, 3))) q5 = for { (c, i) <- categories.sortBy(_.id).zipWithIndex } yield (c.id, i) _ <- mark("q5", q5.result).map(_ shouldBe List((1,0), (2,1), (3,2), (4,3))) q5b = for { (c, i) <- categories.zipWithIndex } yield (c.id, i) _ <- mark("q5b", q5b.result).map(_.map(_._2).toSet shouldBe Set(0L, 1L, 2L, 3L)) q6 = for { ((c, p), i) <- (categories.sortBy(_.id) zip posts.sortBy(_.id)).zipWithIndex } yield (c.id, p.category, i) _ <- mark("q6", q6.result).map(_ shouldBe List((1, -1, 0), (2, 1, 1), (3, 2, 2), (4, 3, 3))) } yield () } def testNoJoinCondition = { class T(tag: Tag) extends Table[Int](tag, "t_nojoincondition") { def id = column[Int]("id") def * = id } lazy val ts = TableQuery[T] for { _ <- ts.schema.create q1 = ts joinLeft ts _ <- q1.result q2 = ts joinRight ts _ <- q2.result q3 = ts join ts _ <- q3.result } yield () } def testMixedJoin = { class A(tag: Tag) extends Table[Int](tag, "a_mixedjoin") { def id = column[Int]("id") def * = id } lazy val as = TableQuery[A] class B(tag: Tag) extends Table[Int](tag, "b_mixedjoin") { def foreignId = column[Int]("foreignId") def * = foreignId } lazy val bs = TableQuery[B] class C(tag: Tag) extends Table[Int](tag, "c_mixedjoin") { def foreignId = column[Int]("foreignId") def * = foreignId } lazy val cs = TableQuery[C] def q1 = for { (a, b) <- as joinLeft bs on (_.id === _.foreignId) } yield (a, b) def q2 = for { (a, b) <- q1 c <- cs if c.foreignId === a.id } yield (a, c) def q3 = for { (a, b) <- as joinLeft bs on (_.id === _.foreignId) c <- cs if c.foreignId === a.id } yield (a, c) DBIO.seq( (as.schema ++ bs.schema ++ cs.schema).create, as ++= Seq(1,2,3), bs ++= Seq(1,2,4,5), cs ++= Seq(1,2,4,6), q1.result.named("q1").map(_.toSet shouldBe Set((1, Some(1)), (2, Some(2)), (3, None))), q2.result.named("q2").map(_.toSet shouldBe Set((1,1), (2,2))), q3.result.named("q3").map(_.toSet shouldBe Set((1,1), (2,2))) ) } def testDiscriminatorCheck = { class A(tag: Tag) extends Table[Int](tag, "a_joinfiltering") { def id = column[Int]("id") def * = id } lazy val as = TableQuery[A] class B(tag: Tag) extends Table[Option[Int]](tag, "b_joinfiltering") { def id = column[Option[Int]]("id") def * = id } lazy val bs = TableQuery[B] val q1 = for { (a, b) <- as joinLeft bs on (_.id.? === _.id) if (b.isEmpty) } yield (a.id) val q2 = bs.joinLeft(as).on(_.id === _.id).filter(_._2.isEmpty).map(_._1.id) DBIO.seq( (as.schema ++ bs.schema).create, as ++= Seq(1,2,3), bs ++= Seq(1,2,4,5).map(Some.apply _), q1.result.map(_.toSet shouldBe Set(3)), q2.result.map(_.toSet shouldBe Set(Some(4), Some(5))) ) } }
nafg/slick
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/JoinTest.scala
Scala
bsd-2-clause
13,644
package org.sisioh.aws4s.sns.model import com.amazonaws.services.sns.model.{ ListTopicsResult, Topic } import org.sisioh.aws4s.PimpedType import scala.collection.JavaConverters._ object ListTopicsResultFactory { def create(): ListTopicsResult = new ListTopicsResult() } class RichListTopicsResult(val underlying: ListTopicsResult) extends AnyVal with PimpedType[ListTopicsResult] { def nextTokenOpt: Option[String] = Option(underlying.getNextToken) def nextTokenOpt_=(value: Option[String]): Unit = underlying.setNextToken(value.orNull) def withNextTokenOpt(value: Option[String]): ListTopicsResult = underlying.withNextToken(value.orNull) def topics: Seq[Topic] = underlying.getTopics.asScala.toVector def topics_=(value: Seq[Topic]): Unit = underlying.setTopics(value.asJava) def withTopics(value: Seq[Topic]): ListTopicsResult = underlying.withTopics(value.asJava) }
sisioh/aws4s
aws4s-sns/src/main/scala/org/sisioh/aws4s/sns/model/RichListTopicsResult.scala
Scala
mit
921
/* Generated File */ package services.film import com.kyleu.projectile.models.result.data.DataField import com.kyleu.projectile.models.result.filter.Filter import com.kyleu.projectile.models.result.orderBy.OrderBy import com.kyleu.projectile.services.ModelServiceHelper import com.kyleu.projectile.services.database.JdbcDatabase import com.kyleu.projectile.util.{Credentials, CsvUtils} import com.kyleu.projectile.util.tracing.{TraceData, TracingService} import java.sql.Connection import java.time.ZonedDateTime import models.film.CategoryRow import models.queries.film.CategoryRowQueries import scala.concurrent.{ExecutionContext, Future} @javax.inject.Singleton class CategoryRowService @javax.inject.Inject() (val db: JdbcDatabase, override val tracing: TracingService)(implicit ec: ExecutionContext) extends ModelServiceHelper[CategoryRow]("categoryRow", "film" -> "CategoryRow") { def getByPrimaryKey(creds: Credentials, categoryId: Int, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.by.primary.key")(td => db.queryF(CategoryRowQueries.getByPrimaryKey(categoryId), conn)(td)) } def getByPrimaryKeyRequired(creds: Credentials, categoryId: Int, conn: Option[Connection] = None)(implicit trace: TraceData) = getByPrimaryKey(creds, categoryId, conn).map { opt => opt.getOrElse(throw new IllegalStateException(s"Cannot load categoryRow with categoryId [$categoryId]")) } def getByPrimaryKeySeq(creds: Credentials, categoryIdSeq: Seq[Int], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { if (categoryIdSeq.isEmpty) { Future.successful(Nil) } else { traceF("get.by.primary.key.seq")(td => db.queryF(CategoryRowQueries.getByPrimaryKeySeq(categoryIdSeq), conn)(td)) } } override def countAll(creds: Credentials, filters: Seq[Filter] = Nil, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.all.count")(td => db.queryF(CategoryRowQueries.countAll(filters), conn)(td)) } override def getAll(creds: Credentials, filters: Seq[Filter] = Nil, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.all")(td => db.queryF(CategoryRowQueries.getAll(filters, orderBys, limit, offset), conn)(td)) } // Search override def searchCount(creds: Credentials, q: Option[String], filters: Seq[Filter] = Nil, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("search.count")(td => db.queryF(CategoryRowQueries.searchCount(q, filters), conn)(td)) } override def search( creds: Credentials, q: Option[String], filters: Seq[Filter] = Nil, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None )(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("search")(td => db.queryF(CategoryRowQueries.search(q, filters, orderBys, limit, offset), conn)(td)) } def searchExact( creds: Credentials, q: String, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None )(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("search.exact")(td => db.queryF(CategoryRowQueries.searchExact(q, orderBys, limit, offset), conn)(td)) } def countByCategoryId(creds: Credentials, categoryId: Int, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("count.by.categoryId")(td => db.queryF(CategoryRowQueries.CountByCategoryId(categoryId), conn)(td)) } def getByCategoryId(creds: Credentials, categoryId: Int, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.by.categoryId")(td => db.queryF(CategoryRowQueries.GetByCategoryId(categoryId, orderBys, limit, offset), conn)(td)) } def getByCategoryIdSeq(creds: Credentials, categoryIdSeq: Seq[Int], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { if (categoryIdSeq.isEmpty) { Future.successful(Nil) } else { traceF("get.by.categoryId.seq") { td => db.queryF(CategoryRowQueries.GetByCategoryIdSeq(categoryIdSeq), conn)(td) } } } def countByLastUpdate(creds: Credentials, lastUpdate: ZonedDateTime, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("count.by.lastUpdate")(td => db.queryF(CategoryRowQueries.CountByLastUpdate(lastUpdate), conn)(td)) } def getByLastUpdate(creds: Credentials, lastUpdate: ZonedDateTime, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.by.lastUpdate")(td => db.queryF(CategoryRowQueries.GetByLastUpdate(lastUpdate, orderBys, limit, offset), conn)(td)) } def getByLastUpdateSeq(creds: Credentials, lastUpdateSeq: Seq[ZonedDateTime], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { if (lastUpdateSeq.isEmpty) { Future.successful(Nil) } else { traceF("get.by.lastUpdate.seq") { td => db.queryF(CategoryRowQueries.GetByLastUpdateSeq(lastUpdateSeq), conn)(td) } } } def countByName(creds: Credentials, name: String, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("count.by.name")(td => db.queryF(CategoryRowQueries.CountByName(name), conn)(td)) } def getByName(creds: Credentials, name: String, orderBys: Seq[OrderBy] = Nil, limit: Option[Int] = None, offset: Option[Int] = None, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { traceF("get.by.name")(td => db.queryF(CategoryRowQueries.GetByName(name, orderBys, limit, offset), conn)(td)) } def getByNameSeq(creds: Credentials, nameSeq: Seq[String], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "view") { if (nameSeq.isEmpty) { Future.successful(Nil) } else { traceF("get.by.name.seq") { td => db.queryF(CategoryRowQueries.GetByNameSeq(nameSeq), conn)(td) } } } // Mutations def insert(creds: Credentials, model: CategoryRow, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { traceF("insert")(td => db.executeF(CategoryRowQueries.insert(model), conn)(td).flatMap { case 1 => getByPrimaryKey(creds, model.categoryId, conn)(td) case _ => throw new IllegalStateException("Unable to find newly-inserted Category") }) } def insertBatch(creds: Credentials, models: Seq[CategoryRow], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { traceF("insertBatch")(td => if (models.isEmpty) { Future.successful(0) } else { db.executeF(CategoryRowQueries.insertBatch(models), conn)(td) }) } def create(creds: Credentials, fields: Seq[DataField], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { traceF("create")(td => db.executeF(CategoryRowQueries.create(fields), conn)(td).flatMap { _ => getByPrimaryKey(creds, fieldVal(fields, "categoryId").toInt, conn) }) } def remove(creds: Credentials, categoryId: Int, conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { traceF("remove")(td => getByPrimaryKey(creds, categoryId, conn)(td).flatMap { case Some(current) => db.executeF(CategoryRowQueries.removeByPrimaryKey(categoryId), conn)(td).map(_ => current) case None => throw new IllegalStateException(s"Cannot find CategoryRow matching [$categoryId]") }) } def update(creds: Credentials, categoryId: Int, fields: Seq[DataField], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { traceF("update")(td => getByPrimaryKey(creds, categoryId, conn)(td).flatMap { case Some(current) if fields.isEmpty => Future.successful(current -> s"No changes required for Category [$categoryId]") case Some(_) => db.executeF(CategoryRowQueries.update(categoryId, fields), conn)(td).flatMap { _ => getByPrimaryKey(creds, fields.find(_.k == "categoryId").flatMap(_.v).map(s => s.toInt).getOrElse(categoryId), conn)(td).map { case Some(newModel) => newModel -> s"Updated [${fields.size}] fields of Category [$categoryId]" case None => throw new IllegalStateException(s"Cannot find CategoryRow matching [$categoryId]") } } case None => throw new IllegalStateException(s"Cannot find CategoryRow matching [$categoryId]") }) } def updateBulk(creds: Credentials, pks: Seq[Int], fields: Seq[DataField], conn: Option[Connection] = None)(implicit trace: TraceData) = checkPerm(creds, "edit") { Future.sequence(pks.map(pk => update(creds, pk, fields, conn))).map { x => s"Updated [${fields.size}] fields for [${x.size} of ${pks.size}] CategoryRow" } } def csvFor(totalCount: Int, rows: Seq[CategoryRow])(implicit trace: TraceData) = { traceB("export.csv")(td => CsvUtils.csvFor(Some(key), totalCount, rows, CategoryRowQueries.fields)(td)) } }
KyleU/boilerplay
app/services/film/CategoryRowService.scala
Scala
cc0-1.0
9,491
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.play.microservice.config import java.security.cert.X509Certificate import play.api.mvc.{Headers, RequestHeader} import play.api.test.FakeHeaders class DummyRequestHeader extends RequestHeader { override def remoteAddress: String = ??? override def headers: Headers = FakeHeaders(Seq.empty) override def queryString: Map[String, Seq[String]] = ??? override def version: String = ??? override def method: String = "GET" override def path: String = "/" override def uri: String = "/" override def tags: Map[String, String] = ??? override def id: Long = ??? override def secure: Boolean = false override def clientCertificateChain: Option[Seq[X509Certificate]] = ??? }
iteratoruk/microservice-bootstrap
src/test/scala/uk/gov/hmrc/play/microservice/config/DummyRequestHeader.scala
Scala
apache-2.0
1,327
package calc import java.util.concurrent.atomic.AtomicInteger import scala.util.Try /** * What is the largest number of these tetrominoes​ which can fit on a 7x7 grid without any overlap? * The pieces can be rotated and reflected. However, they cannot overlap and go off the grid. */ object Calc31 extends App { type Point = (Int, Int) type Grid = Array[Array[Boolean]] val shapes = Seq( Seq((1, 0), (1, 1), (2, 1)), Seq((0, 1), (-1, 1), (-1, 2)), Seq((-1, 0), (-1, -1), (-2, -1)), Seq((0, -1), (1, -1), (1, -2)), Seq((1, 0), (1, -1), (2, -1)), Seq((0, 1), (1, 1), (1, 2)), Seq((-1, 0), (-1, 1), (-2, 1)), Seq((0, -1), (-1, -1), (-1, -2)) ) def test(pt: Point, idx: Int, grid: Grid): Boolean = { val x = pt._1 val y = pt._2 Try { !grid(y)(x) && !grid(y + shapes(idx).head._2)(x + shapes(idx).head._1) && !grid(y + shapes(idx)(1)._2)(x + shapes(idx)(1)._1) && !grid(y + shapes(idx)(2)._2)(x + shapes(idx)(2)._1) } getOrElse false } def set(pt: Point, idx: Int, grid: Grid): Grid = { val x = pt._1 val y = pt._2 grid(y)(x) = true grid(y + shapes(idx).head._2)(x + shapes(idx).head._1) = true grid(y + shapes(idx)(1)._2)(x + shapes(idx)(1)._1) = true grid(y + shapes(idx)(2)._2)(x + shapes(idx)(2)._1) = true grid } val counter = new AtomicInteger(0) implicit class ArrayOps(val array: Grid) extends AnyRef { def dup: Grid = { val count = counter.incrementAndGet() if ((count % 10000) == 0) println(count) val a = new Grid(array.length) a.indices.foreach(y => a(y) = array(y).clone()) a } def free: Int = array.map(_.count(_ == false)).sum def print: String = { array.map(row => row.map(a => if (a) "x" else " ").mkString(" ")).mkString("\\n") } } def recurse(count: Int, grid: Grid): Seq[(Int, Grid)] = { if (grid.free < 10) Seq((count, grid)) // cheating to try to get to terminate early enough to complete else { val newGrids = for { y <- grid.indices.par x <- grid.indices if !grid(y)(x) i <- shapes.indices if test((x, y), i, grid) } yield set((x, y), i, grid.dup) newGrids.seq.flatMap(g => recurse(count + 1, g)) } } def mkGrid(): Grid = { val grid = new Grid(7) grid.indices.foreach(y => grid(y) = new Array[Boolean](7)) grid } val grid = mkGrid() val result = recurse(0, grid) val best = result.maxBy(_._1) println(best) }
ebowman/calc
src/main/scala/calc/Calc31.scala
Scala
unlicense
2,517
package parku30.mapslt import org.scalatest.FlatSpec class MapsltMapSearchServerTest extends FlatSpec { "MapSearchServer" should "construct empty search query if no params" in { assert("" === MapsltMapSearchServer.constructSearchQuery()) } it should "construct SaugomaTeritorija query" in { assert("SaugomaTeritorija = '1'" === MapsltMapSearchServer.constructSearchQuery(saugomaTeritorija = Some("1"))) } it should "construct full query for all params" in { val query = MapsltMapSearchServer.constructSearchQuery(saugomaTeritorija = Some("1"), savivaldybe = Some("2"), kategorija = Some("3")) assert("SaugomaTeritorija = '1' AND Savivaldybe = '2' AND Kategorija = '3'" == query) } it should "call Maps.lt and parse response" in { val result = MapsltMapSearchServer.query(saugomaTeritorija = Some("0700000000025")) val mapsltFeature = result.features.head assert("Velnio duobė" === mapsltFeature.attributes.Pavadinimas) assert(203 === mapsltFeature.attributes.Kategorija) } }
pukomuko/mapslt-to-kml
src/test/scala/parku30/mapslt/MapsltMapSearchServerTest.scala
Scala
mit
1,044
package tanukkii.akkahttp.aws import com.amazonaws.AmazonServiceException import com.amazonaws.http.HttpResponseHandler trait AWSServiceContext[S <: AWSService] { val service: S val errorResponseHandler: HttpResponseHandler[AmazonServiceException] }
TanUkkii007/akka-http-aws
akka-http-aws-core/src/main/scala/tanukkii/akkahttp/aws/AWSServiceContext.scala
Scala
mit
256
/* * Copyright 2019 ACINQ SAS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fr.acinq.eclair.transactions import fr.acinq.bitcoin.SatoshiLong import fr.acinq.eclair.MilliSatoshi import fr.acinq.eclair.blockchain.fee.FeeratePerKw import fr.acinq.eclair.transactions.Transactions.{CommitmentFormat, ZeroFeeHtlcTxAnchorOutputsCommitmentFormat} import fr.acinq.eclair.wire.protocol._ /** * Created by PM on 07/12/2016. */ sealed trait CommitmentOutput object CommitmentOutput { case object ToLocal extends CommitmentOutput case object ToRemote extends CommitmentOutput case object ToLocalAnchor extends CommitmentOutput case object ToRemoteAnchor extends CommitmentOutput case class InHtlc(incomingHtlc: IncomingHtlc) extends CommitmentOutput case class OutHtlc(outgoingHtlc: OutgoingHtlc) extends CommitmentOutput } sealed trait DirectedHtlc { val add: UpdateAddHtlc def opposite: DirectedHtlc = this match { case IncomingHtlc(_) => OutgoingHtlc(add) case OutgoingHtlc(_) => IncomingHtlc(add) } def direction: String = this match { case IncomingHtlc(_) => "IN" case OutgoingHtlc(_) => "OUT" } } object DirectedHtlc { def incoming: PartialFunction[DirectedHtlc, UpdateAddHtlc] = { case h: IncomingHtlc => h.add } def outgoing: PartialFunction[DirectedHtlc, UpdateAddHtlc] = { case h: OutgoingHtlc => h.add } } case class IncomingHtlc(add: UpdateAddHtlc) extends DirectedHtlc case class OutgoingHtlc(add: UpdateAddHtlc) extends DirectedHtlc final case class CommitmentSpec(htlcs: Set[DirectedHtlc], commitTxFeerate: FeeratePerKw, toLocal: MilliSatoshi, toRemote: MilliSatoshi) { def htlcTxFeerate(commitmentFormat: CommitmentFormat): FeeratePerKw = commitmentFormat match { case ZeroFeeHtlcTxAnchorOutputsCommitmentFormat => FeeratePerKw(0 sat) case _ => commitTxFeerate } def findIncomingHtlcById(id: Long): Option[IncomingHtlc] = htlcs.collectFirst { case htlc: IncomingHtlc if htlc.add.id == id => htlc } def findOutgoingHtlcById(id: Long): Option[OutgoingHtlc] = htlcs.collectFirst { case htlc: OutgoingHtlc if htlc.add.id == id => htlc } } object CommitmentSpec { def removeHtlc(changes: List[UpdateMessage], id: Long): List[UpdateMessage] = changes.filterNot { case u: UpdateAddHtlc => u.id == id case _ => false } def addHtlc(spec: CommitmentSpec, directedHtlc: DirectedHtlc): CommitmentSpec = { directedHtlc match { case OutgoingHtlc(add) => spec.copy(toLocal = spec.toLocal - add.amountMsat, htlcs = spec.htlcs + directedHtlc) case IncomingHtlc(add) => spec.copy(toRemote = spec.toRemote - add.amountMsat, htlcs = spec.htlcs + directedHtlc) } } def fulfillIncomingHtlc(spec: CommitmentSpec, htlcId: Long): CommitmentSpec = { spec.findIncomingHtlcById(htlcId) match { case Some(htlc) => spec.copy(toLocal = spec.toLocal + htlc.add.amountMsat, htlcs = spec.htlcs - htlc) case None => throw new RuntimeException(s"cannot find htlc id=$htlcId") } } def fulfillOutgoingHtlc(spec: CommitmentSpec, htlcId: Long): CommitmentSpec = { spec.findOutgoingHtlcById(htlcId) match { case Some(htlc) => spec.copy(toRemote = spec.toRemote + htlc.add.amountMsat, htlcs = spec.htlcs - htlc) case None => throw new RuntimeException(s"cannot find htlc id=$htlcId") } } def failIncomingHtlc(spec: CommitmentSpec, htlcId: Long): CommitmentSpec = { spec.findIncomingHtlcById(htlcId) match { case Some(htlc) => spec.copy(toRemote = spec.toRemote + htlc.add.amountMsat, htlcs = spec.htlcs - htlc) case None => throw new RuntimeException(s"cannot find htlc id=$htlcId") } } def failOutgoingHtlc(spec: CommitmentSpec, htlcId: Long): CommitmentSpec = { spec.findOutgoingHtlcById(htlcId) match { case Some(htlc) => spec.copy(toLocal = spec.toLocal + htlc.add.amountMsat, htlcs = spec.htlcs - htlc) case None => throw new RuntimeException(s"cannot find htlc id=$htlcId") } } def reduce(localCommitSpec: CommitmentSpec, localChanges: List[UpdateMessage], remoteChanges: List[UpdateMessage]): CommitmentSpec = { val spec1 = localChanges.foldLeft(localCommitSpec) { case (spec, u: UpdateAddHtlc) => addHtlc(spec, OutgoingHtlc(u)) case (spec, _) => spec } val spec2 = remoteChanges.foldLeft(spec1) { case (spec, u: UpdateAddHtlc) => addHtlc(spec, IncomingHtlc(u)) case (spec, _) => spec } val spec3 = localChanges.foldLeft(spec2) { case (spec, u: UpdateFulfillHtlc) => fulfillIncomingHtlc(spec, u.id) case (spec, u: UpdateFailHtlc) => failIncomingHtlc(spec, u.id) case (spec, u: UpdateFailMalformedHtlc) => failIncomingHtlc(spec, u.id) case (spec, _) => spec } val spec4 = remoteChanges.foldLeft(spec3) { case (spec, u: UpdateFulfillHtlc) => fulfillOutgoingHtlc(spec, u.id) case (spec, u: UpdateFailHtlc) => failOutgoingHtlc(spec, u.id) case (spec, u: UpdateFailMalformedHtlc) => failOutgoingHtlc(spec, u.id) case (spec, _) => spec } val spec5 = (localChanges ++ remoteChanges).foldLeft(spec4) { case (spec, u: UpdateFee) => spec.copy(commitTxFeerate = u.feeratePerKw) case (spec, _) => spec } spec5 } }
ACINQ/eclair
eclair-core/src/main/scala/fr/acinq/eclair/transactions/CommitmentSpec.scala
Scala
apache-2.0
5,764
package com.atomist.tree.marshal import com.atomist.parse.java.ParsingTargets import com.atomist.rug.TestUtils import com.atomist.rug.kind.DefaultTypeRegistry import com.atomist.rug.kind.core.{FixedBranchRepoResolver, FixedShaRepoResolver, ProjectMutableView} import com.atomist.rug.runtime.js.{SimpleContainerGraphNode, SimpleExecutionContext} import com.atomist.tree.TreeNode import com.atomist.tree.pathexpression.{PathExpression, PathExpressionEngine} import org.scalatest.{FlatSpec, Matchers} class LinkedJsonGraphDeserializerPathExpressionTest extends FlatSpec with Matchers { import com.atomist.tree.pathexpression.PathExpressionParser.parseString val pe = new PathExpressionEngine "deserialized JSON: path expressions" should "work against simple tree" in { val node = LinkedJsonGraphDeserializer.fromJson(TestUtils.contentOf(this, "simple.json")) pe.evaluate(SimpleContainerGraphNode("root", node), "/Issue()") match { case Right(nodes) => assert(nodes.size === 1) case x => fail(s"Unexpected: $x") } } it should "work against tree of n depth" in { val withLinks = TestUtils.contentOf(this, "withLinks.json") val node = LinkedJsonGraphDeserializer.fromJson(withLinks) pe.evaluate(SimpleContainerGraphNode("root", node), "/Build()[@status='Passed']/ON::Repo()/CHANNEL::ChatChannel()") match { case Right(nodes) => assert(nodes.size === 1) case x => fail(s"Unexpected: $x") } assert(node.nodeTags === Set("Build", TreeNode.Dynamic)) assert(node.relatedNodesNamed("status").head.asInstanceOf[TreeNode].value === "Passed") val repo = node.relatedNodesNamed("ON").head assert(repo.relatedNodesNamed("owner").size === 1) val chatChannel = repo.relatedNodesNamed("CHANNEL").head assert(chatChannel.relatedNodesNamed("name").size === 1) assert(chatChannel.relatedNodesNamed("id").head.asInstanceOf[TreeNode].value === "channel-id") } it should "handle an empty result set" in { val node = LinkedJsonGraphDeserializer.fromJson("[]") pe.evaluate(SimpleContainerGraphNode("root", node), "/*") match { case Right(nodes) => nodes.size should be (1) assert(nodes.head.nodeTags === Set.empty) case x => fail(s"Unexpected: $x") } } it should "handle unresolvable in Repo -> Project using master" in { val as = ParsingTargets.NewStartSpringIoProject val ec = SimpleExecutionContext(DefaultTypeRegistry, Some(FixedBranchRepoResolver("owner", "repo-name", "master", as))) val json = TestUtils.contentOf(this, "withLinksAndUnresolvable.json") val node = LinkedJsonGraphDeserializer.fromJson(json) val pex: PathExpression = "/Build()/ON::Repo()/master::Project()" pe.evaluate(SimpleContainerGraphNode("root", node), pex, ec) match { case Right(nodes) => nodes.size should be (1) val proj = nodes.head.asInstanceOf[ProjectMutableView] assert(proj.totalFileCount === as.totalFileCount) case x => fail(s"Unexpected: $x") } } it should "handle unresolvable in Repo -> Project using sha" in { val sha = "d6cd1e2bd19e03a81132a23b2025920577f84e37" val as = ParsingTargets.NewStartSpringIoProject val ec = SimpleExecutionContext(DefaultTypeRegistry, Some(FixedShaRepoResolver("owner", "repo-name", sha, as))) val json = TestUtils.contentOf(this, "withLinksAndUnresolvable.json") val node = LinkedJsonGraphDeserializer.fromJson(json) val pex: PathExpression = s"/Build()/ON::Repo()/$sha::Project()" pe.evaluate(SimpleContainerGraphNode("root", node), pex, ec) match { case Right(nodes) => nodes.size should be (1) val proj = nodes.head.asInstanceOf[ProjectMutableView] assert(proj.totalFileCount === as.totalFileCount) case x => fail(s"Unexpected: $x") } } it should "produce appropriate error message against real Push data missing commit" in { val sha = "d6cd1e2bd19e03a81132a23b2025920577f84e37" val as = ParsingTargets.NewStartSpringIoProject val ec = SimpleExecutionContext(DefaultTypeRegistry, Some(FixedShaRepoResolver("owner", "repo-name", sha, as))) val json = TestUtils.contentOf(this, "realPushWithoutCommitToRepo.json") val node = LinkedJsonGraphDeserializer.fromJson(json) val pex: PathExpression = s"/Push()/after::Commit()/source::Project()" try { pe.evaluate(SimpleContainerGraphNode("root", node), pex, ec) fail() } catch { case ex: IllegalArgumentException => assert(ex.getMessage.contains("Commit") && ex.getMessage.contains("Repo")) } } }
atomist/rug
src/test/scala/com/atomist/tree/marshal/LinkedJsonGraphDeserializerPathExpressionTest.scala
Scala
gpl-3.0
4,630
package controllers import org.intracer.finance.{Expenditure, User} import org.intracer.finance.slick._ import org.specs2.mock.Mockito import play.api.db.DBApi import play.api.db.slick.{SlickApi, SlickModule} import play.api.inject.bind import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.Codecs import play.api.test.TestBrowser trait WebSpecUtil extends Mockito { val defaultEmail = "dev@dot.com" val defaultPassword = "1234" val defaultUserId = 12 val defaultUser = User(Some(defaultUserId), "Dev", defaultEmail, password = Some(defaultPassword)) val accountDao = mock[AccountDao] val categoryDao = mock[CategoryDao] val grantDao = mock[GrantDao] val grantItemsDao = mock[GrantItemsDao] val projectDao = mock[ProjectDao] val userDao = mockUserDao() val expenditureDao = mockExpenditureDao() def noDbApp = new GuiceApplicationBuilder() .disable(classOf[SlickModule]) .disable(classOf[play.api.db.evolutions.EvolutionsModule]) .disable(classOf[play.api.db.slick.evolutions.EvolutionsModule]) .bindings(bind(classOf[DBApi]).to(mock[DBApi])) .bindings(bind(classOf[play.api.db.Database]).to(mock[play.api.db.Database])) .bindings(bind(classOf[play.db.Database]).to(mock[play.db.Database])) .bindings(bind(classOf[SlickApi]).to(mock[SlickApi])) .bindings(bind[AccountDao].to(accountDao)) .bindings(bind[CategoryDao].to(categoryDao)) .bindings(bind[ExpenditureDao].to(expenditureDao)) .bindings(bind[GrantDao].to(grantDao)) .bindings(bind[GrantItemsDao].to(grantItemsDao)) .bindings(bind[ProjectDao].to(projectDao)) .bindings(bind[UserDao].to(userDao)) def login(browser: TestBrowser, user: User = defaultUser) = browser.goTo("/") .fill("#login").`with`(user.email) .fill("#password").`with`(user.password.get) .submit("#submit") def waitForUrl(url: String, browser: TestBrowser) = browser.waitUntil(browser.url() == url) def mockUserDao(user: User = defaultUser): UserDao = { val userDao = mock[UserDao] userDao.count returns 1 userDao.login(user.email, user.password.get) returns Some(withSha1(user)) userDao.byEmail(user.email) returns Some(withSha1(user)) userDao } def withSha1(user: User) = user.copy(password = user.password.map(s => Codecs.sha1(s.getBytes))) def mockExpenditureDao(list: Seq[Expenditure] = Nil): ExpenditureDao = { val expenditureDao = mock[ExpenditureDao] expenditureDao.list returns list expenditureDao } }
intracer/wmua-finance
test/controllers/WebSpecUtil.scala
Scala
apache-2.0
2,519
package com.raquo.domtypes.generic.defs.styles.keywords /** @see https://developer.mozilla.org/en-US/docs/Web/CSS/font-style */ trait FontStyleStyle[T] extends NormalStyle[T] { /** * Selects a font that is labeled italic, if that is not available, * one labeled oblique --MDN */ lazy val italic: T = buildStringValue("italic") /** Selects a font that is labeled oblique --MDN */ lazy val oblique: T = buildStringValue("oblique") }
raquo/scala-dom-types
shared/src/main/scala/com/raquo/domtypes/generic/defs/styles/keywords/FontStyleStyle.scala
Scala
mit
456
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.tools import joptsimple.OptionParser import java.util.Properties import java.util.Random import java.io._ import kafka.consumer._ import kafka.serializer._ import kafka.utils._ import kafka.log.FileMessageSet import kafka.log.Log import org.apache.kafka.clients.producer.{ProducerRecord, KafkaProducer, ProducerConfig} /** * This is a torture test that runs against an existing broker. Here is how it works: * * It produces a series of specially formatted messages to one or more partitions. Each message it produces * it logs out to a text file. The messages have a limited set of keys, so there is duplication in the key space. * * The broker will clean its log as the test runs. * * When the specified number of messages have been produced we create a consumer and consume all the messages in the topic * and write that out to another text file. * * Using a stable unix sort we sort both the producer log of what was sent and the consumer log of what was retrieved by the message key. * Then we compare the final message in both logs for each key. If this final message is not the same for all keys we * print an error and exit with exit code 1, otherwise we print the size reduction and exit with exit code 0. */ object TestLogCleaning { def main(args: Array[String]) { val parser = new OptionParser val numMessagesOpt = parser.accepts("messages", "The number of messages to send or consume.") .withRequiredArg .describedAs("count") .ofType(classOf[java.lang.Long]) .defaultsTo(Long.MaxValue) val numDupsOpt = parser.accepts("duplicates", "The number of duplicates for each key.") .withRequiredArg .describedAs("count") .ofType(classOf[java.lang.Integer]) .defaultsTo(5) val brokerOpt = parser.accepts("broker", "Url to connect to.") .withRequiredArg .describedAs("url") .ofType(classOf[String]) val topicsOpt = parser.accepts("topics", "The number of topics to test.") .withRequiredArg .describedAs("count") .ofType(classOf[java.lang.Integer]) .defaultsTo(1) val percentDeletesOpt = parser.accepts("percent-deletes", "The percentage of updates that are deletes.") .withRequiredArg .describedAs("percent") .ofType(classOf[java.lang.Integer]) .defaultsTo(0) val zkConnectOpt = parser.accepts("zk", "Zk url.") .withRequiredArg .describedAs("url") .ofType(classOf[String]) val sleepSecsOpt = parser.accepts("sleep", "Time to sleep between production and consumption.") .withRequiredArg .describedAs("ms") .ofType(classOf[java.lang.Integer]) .defaultsTo(0) val dumpOpt = parser.accepts("dump", "Dump the message contents of a topic partition that contains test data from this test to standard out.") .withRequiredArg .describedAs("directory") .ofType(classOf[String]) val options = parser.parse(args:_*) if(args.length == 0) CommandLineUtils.printUsageAndDie(parser, "An integration test for log cleaning.") if(options.has(dumpOpt)) { dumpLog(new File(options.valueOf(dumpOpt))) System.exit(0) } CommandLineUtils.checkRequiredArgs(parser, options, brokerOpt, zkConnectOpt, numMessagesOpt) // parse options val messages = options.valueOf(numMessagesOpt).longValue val percentDeletes = options.valueOf(percentDeletesOpt).intValue val dups = options.valueOf(numDupsOpt).intValue val brokerUrl = options.valueOf(brokerOpt) val topicCount = options.valueOf(topicsOpt).intValue val zkUrl = options.valueOf(zkConnectOpt) val sleepSecs = options.valueOf(sleepSecsOpt).intValue val testId = new Random().nextInt(Int.MaxValue) val topics = (0 until topicCount).map("log-cleaner-test-" + testId + "-" + _).toArray println("Producing %d messages...".format(messages)) val producedDataFile = produceMessages(brokerUrl, topics, messages, dups, percentDeletes) println("Sleeping for %d seconds...".format(sleepSecs)) Thread.sleep(sleepSecs * 1000) println("Consuming messages...") val consumedDataFile = consumeMessages(zkUrl, topics) val producedLines = lineCount(producedDataFile) val consumedLines = lineCount(consumedDataFile) val reduction = 1.0 - consumedLines.toDouble/producedLines.toDouble println("%d rows of data produced, %d rows of data consumed (%.1f%% reduction).".format(producedLines, consumedLines, 100 * reduction)) println("De-duplicating and validating output files...") validateOutput(producedDataFile, consumedDataFile) producedDataFile.delete() consumedDataFile.delete() } def dumpLog(dir: File) { require(dir.exists, "Non-existent directory: " + dir.getAbsolutePath) for(file <- dir.list.sorted; if file.endsWith(Log.LogFileSuffix)) { val ms = new FileMessageSet(new File(dir, file)) for(entry <- ms) { val key = Utils.readString(entry.message.key) val content = if(entry.message.isNull) null else Utils.readString(entry.message.payload) println("offset = %s, key = %s, content = %s".format(entry.offset, key, content)) } } } def lineCount(file: File): Int = io.Source.fromFile(file).getLines.size def validateOutput(producedDataFile: File, consumedDataFile: File) { val producedReader = externalSort(producedDataFile) val consumedReader = externalSort(consumedDataFile) val produced = valuesIterator(producedReader) val consumed = valuesIterator(consumedReader) val producedDedupedFile = new File(producedDataFile.getAbsolutePath + ".deduped") val producedDeduped = new BufferedWriter(new FileWriter(producedDedupedFile), 1024*1024) val consumedDedupedFile = new File(consumedDataFile.getAbsolutePath + ".deduped") val consumedDeduped = new BufferedWriter(new FileWriter(consumedDedupedFile), 1024*1024) var total = 0 var mismatched = 0 while(produced.hasNext && consumed.hasNext) { val p = produced.next() producedDeduped.write(p.toString) producedDeduped.newLine() val c = consumed.next() consumedDeduped.write(c.toString) consumedDeduped.newLine() if(p != c) mismatched += 1 total += 1 } producedDeduped.close() consumedDeduped.close() require(!produced.hasNext, "Additional values produced not found in consumer log.") require(!consumed.hasNext, "Additional values consumed not found in producer log.") println("Validated " + total + " values, " + mismatched + " mismatches.") require(mismatched == 0, "Non-zero number of row mismatches.") // if all the checks worked out we can delete the deduped files producedDedupedFile.delete() consumedDedupedFile.delete() } def valuesIterator(reader: BufferedReader) = { new IteratorTemplate[TestRecord] { def makeNext(): TestRecord = { var next = readNext(reader) while(next != null && next.delete) next = readNext(reader) if(next == null) allDone() else next } } } def readNext(reader: BufferedReader): TestRecord = { var line = reader.readLine() if(line == null) return null var curr = new TestRecord(line) while(true) { line = peekLine(reader) if(line == null) return curr val next = new TestRecord(line) if(next == null || next.topicAndKey != curr.topicAndKey) return curr curr = next reader.readLine() } null } def peekLine(reader: BufferedReader) = { reader.mark(4096) val line = reader.readLine reader.reset() line } def externalSort(file: File): BufferedReader = { val builder = new ProcessBuilder("sort", "--key=1,2", "--stable", "--buffer-size=20%", "--temporary-directory=" + System.getProperty("java.io.tmpdir"), file.getAbsolutePath) val process = builder.start() new Thread() { override def run() { val exitCode = process.waitFor() if(exitCode != 0) { System.err.println("Process exited abnormally.") while(process.getErrorStream.available > 0) { System.err.write(process.getErrorStream().read()) } } } }.start() new BufferedReader(new InputStreamReader(process.getInputStream()), 10*1024*1024) } def produceMessages(brokerUrl: String, topics: Array[String], messages: Long, dups: Int, percentDeletes: Int): File = { val producerProps = new Properties producerProps.setProperty(ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, "true") producerProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") val producer = new KafkaProducer[Array[Byte],Array[Byte]](producerProps) val rand = new Random(1) val keyCount = (messages / dups).toInt val producedFile = File.createTempFile("kafka-log-cleaner-produced-", ".txt") println("Logging produce requests to " + producedFile.getAbsolutePath) val producedWriter = new BufferedWriter(new FileWriter(producedFile), 1024*1024) for(i <- 0L until (messages * topics.length)) { val topic = topics((i % topics.length).toInt) val key = rand.nextInt(keyCount) val delete = i % 100 < percentDeletes val msg = if(delete) new ProducerRecord[Array[Byte],Array[Byte]](topic, key.toString.getBytes(), null) else new ProducerRecord[Array[Byte],Array[Byte]](topic, key.toString.getBytes(), i.toString.getBytes()) producer.send(msg) producedWriter.write(TestRecord(topic, key, i, delete).toString) producedWriter.newLine() } producedWriter.close() producer.close() producedFile } def makeConsumer(zkUrl: String, topics: Array[String]): ZookeeperConsumerConnector = { val consumerProps = new Properties consumerProps.setProperty("group.id", "log-cleaner-test-" + new Random().nextInt(Int.MaxValue)) consumerProps.setProperty("zookeeper.connect", zkUrl) consumerProps.setProperty("consumer.timeout.ms", (20*1000).toString) consumerProps.setProperty("auto.offset.reset", "smallest") new ZookeeperConsumerConnector(new ConsumerConfig(consumerProps)) } def consumeMessages(zkUrl: String, topics: Array[String]): File = { val connector = makeConsumer(zkUrl, topics) val streams = connector.createMessageStreams(topics.map(topic => (topic, 1)).toMap, new StringDecoder, new StringDecoder) val consumedFile = File.createTempFile("kafka-log-cleaner-consumed-", ".txt") println("Logging consumed messages to " + consumedFile.getAbsolutePath) val consumedWriter = new BufferedWriter(new FileWriter(consumedFile)) for(topic <- topics) { val stream = streams(topic).head try { for(item <- stream) { val delete = item.message == null val value = if(delete) -1L else item.message.toLong consumedWriter.write(TestRecord(topic, item.key.toInt, value, delete).toString) consumedWriter.newLine() } } catch { case e: ConsumerTimeoutException => } } consumedWriter.close() connector.shutdown() consumedFile } } case class TestRecord(val topic: String, val key: Int, val value: Long, val delete: Boolean) { def this(pieces: Array[String]) = this(pieces(0), pieces(1).toInt, pieces(2).toLong, pieces(3) == "d") def this(line: String) = this(line.split("\t")) override def toString() = topic + "\t" + key + "\t" + value + "\t" + (if(delete) "d" else "u") def topicAndKey = topic + key }
cran/rkafkajars
java/kafka/tools/TestLogCleaning.scala
Scala
apache-2.0
13,457
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.util.collection import java.lang.{Float => JFloat} import java.util.{Arrays, Comparator} import org.apache.spark.SparkFunSuite import org.apache.spark.internal.Logging import org.apache.spark.util.random.XORShiftRandom class SorterSuite extends SparkFunSuite with Logging { test("equivalent to Arrays.sort") { val rand = new XORShiftRandom(123) val data0 = Array.tabulate[Int](10000) { i => rand.nextInt() } val data1 = data0.clone() val data2 = data0.clone() Arrays.sort(data0) new Sorter(new IntArraySortDataFormat).sort(data1, 0, data1.length, Ordering.Int) new Sorter(new KeyReuseIntArraySortDataFormat) .sort(data2, 0, data2.length, Ordering[IntWrapper]) assert(data0.view === data1.view) assert(data0.view === data2.view) } test("KVArraySorter") { val rand = new XORShiftRandom(456) // Construct an array of keys (to Java sort) and an array where the keys and values // alternate. Keys are random doubles, values are ordinals from 0 to length. val keys = Array.tabulate[Double](5000) { i => rand.nextDouble() } val keyValueArray = Array.tabulate[Number](10000) { i => if (i % 2 == 0) keys(i / 2) else Integer.valueOf(i / 2) } // Map from generated keys to values, to verify correctness later val kvMap = keyValueArray.grouped(2).map { case Array(k, v) => k.doubleValue() -> v.intValue() }.toMap Arrays.sort(keys) new Sorter(new KVArraySortDataFormat[Double, Number]) .sort(keyValueArray, 0, keys.length, Ordering.Double) keys.zipWithIndex.foreach { case (k, i) => assert(k === keyValueArray(2 * i)) assert(kvMap(k) === keyValueArray(2 * i + 1)) } } // http://www.envisage-project.eu/timsort-specification-and-verification/ test("SPARK-5984 TimSort bug") { val data = TestTimSort.getTimSortBugTestSet(67108864) new Sorter(new IntArraySortDataFormat).sort(data, 0, data.length, Ordering.Int) (0 to data.length - 2).foreach(i => assert(data(i) <= data(i + 1))) } /** Runs an experiment several times. */ def runExperiment(name: String, skip: Boolean = false)(f: => Unit, prepare: () => Unit): Unit = { if (skip) { logInfo(s"Skipped experiment $name.") return } val firstTry = org.apache.spark.util.Utils.timeIt(1)(f, Some(prepare)) System.gc() var i = 0 var next10: Long = 0 while (i < 10) { val time = org.apache.spark.util.Utils.timeIt(1)(f, Some(prepare)) next10 += time logInfo(s"$name: Took $time ms") i += 1 } logInfo(s"$name: ($firstTry ms first try, ${next10 / 10} ms average)") } /** * This provides a simple benchmark for comparing the Sorter with Java internal sorting. * Ideally these would be executed one at a time, each in their own JVM, so their listing * here is mainly to have the code. Running multiple tests within the same JVM session would * prevent JIT inlining overridden methods and hence hurt the performance. * * The goal of this code is to sort an array of key-value pairs, where the array physically * has the keys and values alternating. The basic Java sorts work only on the keys, so the * real Java solution is to make Tuple2s to store the keys and values and sort an array of * those, while the Sorter approach can work directly on the input data format. */ ignore("Sorter benchmark for key-value pairs") { val numElements = 25000000 // 25 mil val rand = new XORShiftRandom(123) // Test our key-value pairs where each element is a Tuple2[Float, Integer]. val kvTuples = Array.tabulate(numElements) { i => (JFloat.valueOf(rand.nextFloat()), Integer.valueOf(i)) } val kvTupleArray = new Array[AnyRef](numElements) val prepareKvTupleArray = () => { System.arraycopy(kvTuples, 0, kvTupleArray, 0, numElements) } runExperiment("Tuple-sort using Arrays.sort()")({ Arrays.sort(kvTupleArray, new Comparator[AnyRef] { override def compare(x: AnyRef, y: AnyRef): Int = x.asInstanceOf[(JFloat, _)]._1.compareTo(y.asInstanceOf[(JFloat, _)]._1) }) }, prepareKvTupleArray) // Test our Sorter where each element alternates between Float and Integer, non-primitive val keyValues = { val data = new Array[AnyRef](numElements * 2) var i = 0 while (i < numElements) { data(2 * i) = kvTuples(i)._1 data(2 * i + 1) = kvTuples(i)._2 i += 1 } data } val keyValueArray = new Array[AnyRef](numElements * 2) val prepareKeyValueArray = () => { System.arraycopy(keyValues, 0, keyValueArray, 0, numElements * 2) } val sorter = new Sorter(new KVArraySortDataFormat[JFloat, AnyRef]) runExperiment("KV-sort using Sorter")({ sorter.sort(keyValueArray, 0, numElements, new Comparator[JFloat] { override def compare(x: JFloat, y: JFloat): Int = x.compareTo(y) }) }, prepareKeyValueArray) } /** * Tests for sorting with primitive keys with/without key reuse. Java's Arrays.sort is used as * reference, which is expected to be faster but it can only sort a single array. Sorter can be * used to sort parallel arrays. * * Ideally these would be executed one at a time, each in their own JVM, so their listing * here is mainly to have the code. Running multiple tests within the same JVM session would * prevent JIT inlining overridden methods and hence hurt the performance. */ ignore("Sorter benchmark for primitive int array") { val numElements = 25000000 // 25 mil val rand = new XORShiftRandom(123) val ints = Array.fill(numElements)(rand.nextInt()) val intObjects = { val data = new Array[Integer](numElements) var i = 0 while (i < numElements) { data(i) = Integer.valueOf(ints(i)) i += 1 } data } val intObjectArray = new Array[Integer](numElements) val prepareIntObjectArray = () => { System.arraycopy(intObjects, 0, intObjectArray, 0, numElements) } runExperiment("Java Arrays.sort() on non-primitive int array")({ Arrays.sort(intObjectArray, new Comparator[Integer] { override def compare(x: Integer, y: Integer): Int = x.compareTo(y) }) }, prepareIntObjectArray) val intPrimitiveArray = new Array[Int](numElements) val prepareIntPrimitiveArray = () => { System.arraycopy(ints, 0, intPrimitiveArray, 0, numElements) } runExperiment("Java Arrays.sort() on primitive int array")({ Arrays.sort(intPrimitiveArray) }, prepareIntPrimitiveArray) val sorterWithoutKeyReuse = new Sorter(new IntArraySortDataFormat) runExperiment("Sorter without key reuse on primitive int array")({ sorterWithoutKeyReuse.sort(intPrimitiveArray, 0, numElements, Ordering[Int]) }, prepareIntPrimitiveArray) val sorterWithKeyReuse = new Sorter(new KeyReuseIntArraySortDataFormat) runExperiment("Sorter with key reuse on primitive int array")({ sorterWithKeyReuse.sort(intPrimitiveArray, 0, numElements, Ordering[IntWrapper]) }, prepareIntPrimitiveArray) } } abstract class AbstractIntArraySortDataFormat[K] extends SortDataFormat[K, Array[Int]] { override def swap(data: Array[Int], pos0: Int, pos1: Int): Unit = { val tmp = data(pos0) data(pos0) = data(pos1) data(pos1) = tmp } override def copyElement(src: Array[Int], srcPos: Int, dst: Array[Int], dstPos: Int) { dst(dstPos) = src(srcPos) } /** Copy a range of elements starting at src(srcPos) to dest, starting at destPos. */ override def copyRange(src: Array[Int], srcPos: Int, dst: Array[Int], dstPos: Int, length: Int) { System.arraycopy(src, srcPos, dst, dstPos, length) } /** Allocates a new structure that can hold up to 'length' elements. */ override def allocate(length: Int): Array[Int] = { new Array[Int](length) } } /** Format to sort a simple Array[Int]. Could be easily generified and specialized. */ class IntArraySortDataFormat extends AbstractIntArraySortDataFormat[Int] { override protected def getKey(data: Array[Int], pos: Int): Int = { data(pos) } } /** Wrapper of Int for key reuse. */ class IntWrapper(var key: Int = 0) extends Ordered[IntWrapper] { override def compare(that: IntWrapper): Int = { Ordering.Int.compare(key, that.key) } } /** SortDataFormat for Array[Int] with reused keys. */ class KeyReuseIntArraySortDataFormat extends AbstractIntArraySortDataFormat[IntWrapper] { override def newKey(): IntWrapper = { new IntWrapper() } override def getKey(data: Array[Int], pos: Int, reuse: IntWrapper): IntWrapper = { if (reuse == null) { new IntWrapper(data(pos)) } else { reuse.key = data(pos) reuse } } override protected def getKey(data: Array[Int], pos: Int): IntWrapper = { getKey(data, pos, null) } }
hhbyyh/spark
core/src/test/scala/org/apache/spark/util/collection/SorterSuite.scala
Scala
apache-2.0
9,717
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions.codegen import java.io.ByteArrayInputStream import java.util.{Map => JavaMap} import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.language.existentials import scala.util.control.NonFatal import com.google.common.cache.{CacheBuilder, CacheLoader} import com.google.common.util.concurrent.{ExecutionError, UncheckedExecutionException} import org.codehaus.commons.compiler.CompileException import org.codehaus.janino.{ByteArrayClassLoader, ClassBodyEvaluator, InternalCompilerException, SimpleCompiler} import org.codehaus.janino.util.ClassFile import org.apache.spark.{SparkEnv, TaskContext, TaskKilledException} import org.apache.spark.executor.InputMetrics import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.CodegenMetrics import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.util.{ArrayData, MapData} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.unsafe.Platform import org.apache.spark.unsafe.types._ import org.apache.spark.util.{ParentClassLoader, Utils} /** * Java source for evaluating an [[Expression]] given a [[InternalRow]] of input. * * @param code The sequence of statements required to evaluate the expression. * It should be empty string, if `isNull` and `value` are already existed, or no code * needed to evaluate them (literals). * @param isNull A term that holds a boolean value representing whether the expression evaluated * to null. * @param value A term for a (possibly primitive) value of the result of the evaluation. Not * valid if `isNull` is set to `true`. */ case class ExprCode(var code: String, var isNull: String, var value: String) object ExprCode { def forNullValue(dataType: DataType): ExprCode = { val defaultValueLiteral = CodeGenerator.defaultValue(dataType, typedNull = true) ExprCode(code = "", isNull = "true", value = defaultValueLiteral) } def forNonNullValue(value: String): ExprCode = { ExprCode(code = "", isNull = "false", value = value) } } /** * State used for subexpression elimination. * * @param isNull A term that holds a boolean value representing whether the expression evaluated * to null. * @param value A term for a value of a common sub-expression. Not valid if `isNull` * is set to `true`. */ case class SubExprEliminationState(isNull: String, value: String) /** * Codes and common subexpressions mapping used for subexpression elimination. * * @param codes Strings representing the codes that evaluate common subexpressions. * @param states Foreach expression that is participating in subexpression elimination, * the state to use. */ case class SubExprCodes(codes: Seq[String], states: Map[Expression, SubExprEliminationState]) /** * The main information about a new added function. * * @param functionName String representing the name of the function * @param innerClassName Optional value which is empty if the function is added to * the outer class, otherwise it contains the name of the * inner class in which the function has been added. * @param innerClassInstance Optional value which is empty if the function is added to * the outer class, otherwise it contains the name of the * instance of the inner class in the outer class. */ private[codegen] case class NewFunctionSpec( functionName: String, innerClassName: Option[String], innerClassInstance: Option[String]) /** * A context for codegen, tracking a list of objects that could be passed into generated Java * function. */ class CodegenContext { import CodeGenerator._ /** * Holding a list of objects that could be used passed into generated class. */ val references: mutable.ArrayBuffer[Any] = new mutable.ArrayBuffer[Any]() /** * Add an object to `references`. * * Returns the code to access it. * * This does not to store the object into field but refer it from the references field at the * time of use because number of fields in class is limited so we should reduce it. */ def addReferenceObj(objName: String, obj: Any, className: String = null): String = { val idx = references.length references += obj val clsName = Option(className).getOrElse(obj.getClass.getName) s"(($clsName) references[$idx] /* $objName */)" } /** * Holding the variable name of the input row of the current operator, will be used by * `BoundReference` to generate code. * * Note that if `currentVars` is not null, `BoundReference` prefers `currentVars` over `INPUT_ROW` * to generate code. If you want to make sure the generated code use `INPUT_ROW`, you need to set * `currentVars` to null, or set `currentVars(i)` to null for certain columns, before calling * `Expression.genCode`. */ var INPUT_ROW = "i" /** * Holding a list of generated columns as input of current operator, will be used by * BoundReference to generate code. */ var currentVars: Seq[ExprCode] = null /** * Holding expressions' inlined mutable states like `MonotonicallyIncreasingID.count` as a * 2-tuple: java type, variable name. * As an example, ("int", "count") will produce code: * {{{ * private int count; * }}} * as a member variable * * They will be kept as member variables in generated classes like `SpecificProjection`. * * Exposed for tests only. */ private[catalyst] val inlinedMutableStates: mutable.ArrayBuffer[(String, String)] = mutable.ArrayBuffer.empty[(String, String)] /** * The mapping between mutable state types and corrseponding compacted arrays. * The keys are java type string. The values are [[MutableStateArrays]] which encapsulates * the compacted arrays for the mutable states with the same java type. * * Exposed for tests only. */ private[catalyst] val arrayCompactedMutableStates: mutable.Map[String, MutableStateArrays] = mutable.Map.empty[String, MutableStateArrays] // An array holds the code that will initialize each state // Exposed for tests only. private[catalyst] val mutableStateInitCode: mutable.ArrayBuffer[String] = mutable.ArrayBuffer.empty[String] // Tracks the names of all the mutable states. private val mutableStateNames: mutable.HashSet[String] = mutable.HashSet.empty /** * This class holds a set of names of mutableStateArrays that is used for compacting mutable * states for a certain type, and holds the next available slot of the current compacted array. */ class MutableStateArrays { val arrayNames = mutable.ListBuffer.empty[String] createNewArray() private[this] var currentIndex = 0 private def createNewArray() = { val newArrayName = freshName("mutableStateArray") mutableStateNames += newArrayName arrayNames.append(newArrayName) } def getCurrentIndex: Int = currentIndex /** * Returns the reference of next available slot in current compacted array. The size of each * compacted array is controlled by the constant `MUTABLESTATEARRAY_SIZE_LIMIT`. * Once reaching the threshold, new compacted array is created. */ def getNextSlot(): String = { if (currentIndex < MUTABLESTATEARRAY_SIZE_LIMIT) { val res = s"${arrayNames.last}[$currentIndex]" currentIndex += 1 res } else { createNewArray() currentIndex = 1 s"${arrayNames.last}[0]" } } } /** * A map containing the mutable states which have been defined so far using * `addImmutableStateIfNotExists`. Each entry contains the name of the mutable state as key and * its Java type and init code as value. */ private val immutableStates: mutable.Map[String, (String, String)] = mutable.Map.empty[String, (String, String)] /** * Add a mutable state as a field to the generated class. c.f. the comments above. * * @param javaType Java type of the field. Note that short names can be used for some types, * e.g. InternalRow, UnsafeRow, UnsafeArrayData, etc. Other types will have to * specify the fully-qualified Java type name. See the code in doCompile() for * the list of default imports available. * Also, generic type arguments are accepted but ignored. * @param variableName Name of the field. * @param initFunc Function includes statement(s) to put into the init() method to initialize * this field. The argument is the name of the mutable state variable. * If left blank, the field will be default-initialized. * @param forceInline whether the declaration and initialization code may be inlined rather than * compacted. Please set `true` into forceInline for one of the followings: * 1. use the original name of the status * 2. expect to non-frequently generate the status * (e.g. not much sort operators in one stage) * @param useFreshName If this is false and the mutable state ends up inlining in the outer * class, the name is not changed * @return the name of the mutable state variable, which is the original name or fresh name if * the variable is inlined to the outer class, or an array access if the variable is to * be stored in an array of variables of the same type. * A variable will be inlined into the outer class when one of the following conditions * are satisfied: * 1. forceInline is true * 2. its type is primitive type and the total number of the inlined mutable variables * is less than `OUTER_CLASS_VARIABLES_THRESHOLD` * 3. its type is multi-dimensional array * When a variable is compacted into an array, the max size of the array for compaction * is given by `MUTABLESTATEARRAY_SIZE_LIMIT`. */ def addMutableState( javaType: String, variableName: String, initFunc: String => String = _ => "", forceInline: Boolean = false, useFreshName: Boolean = true): String = { // want to put a primitive type variable at outerClass for performance val canInlinePrimitive = isPrimitiveType(javaType) && (inlinedMutableStates.length < OUTER_CLASS_VARIABLES_THRESHOLD) if (forceInline || canInlinePrimitive || javaType.contains("[][]")) { val varName = if (useFreshName) freshName(variableName) else variableName val initCode = initFunc(varName) inlinedMutableStates += ((javaType, varName)) mutableStateInitCode += initCode mutableStateNames += varName varName } else { val arrays = arrayCompactedMutableStates.getOrElseUpdate(javaType, new MutableStateArrays) val element = arrays.getNextSlot() val initCode = initFunc(element) mutableStateInitCode += initCode element } } /** * Add an immutable state as a field to the generated class only if it does not exist yet a field * with that name. This helps reducing the number of the generated class' fields, since the same * variable can be reused by many functions. * * Even though the added variables are not declared as final, they should never be reassigned in * the generated code to prevent errors and unexpected behaviors. * * Internally, this method calls `addMutableState`. * * @param javaType Java type of the field. * @param variableName Name of the field. * @param initFunc Function includes statement(s) to put into the init() method to initialize * this field. The argument is the name of the mutable state variable. */ def addImmutableStateIfNotExists( javaType: String, variableName: String, initFunc: String => String = _ => ""): Unit = { val existingImmutableState = immutableStates.get(variableName) if (existingImmutableState.isEmpty) { addMutableState(javaType, variableName, initFunc, useFreshName = false, forceInline = true) immutableStates(variableName) = (javaType, initFunc(variableName)) } else { val (prevJavaType, prevInitCode) = existingImmutableState.get assert(prevJavaType == javaType, s"$variableName has already been defined with type " + s"$prevJavaType and now it is tried to define again with type $javaType.") assert(prevInitCode == initFunc(variableName), s"$variableName has already been defined " + s"with different initialization statements.") } } /** * Add buffer variable which stores data coming from an [[InternalRow]]. This methods guarantees * that the variable is safely stored, which is important for (potentially) byte array backed * data types like: UTF8String, ArrayData, MapData & InternalRow. */ def addBufferedState(dataType: DataType, variableName: String, initCode: String): ExprCode = { val value = addMutableState(javaType(dataType), variableName) val code = dataType match { case StringType => s"$value = $initCode.clone();" case _: StructType | _: ArrayType | _: MapType => s"$value = $initCode.copy();" case _ => s"$value = $initCode;" } ExprCode(code, "false", value) } def declareMutableStates(): String = { // It's possible that we add same mutable state twice, e.g. the `mergeExpressions` in // `TypedAggregateExpression`, we should call `distinct` here to remove the duplicated ones. val inlinedStates = inlinedMutableStates.distinct.map { case (javaType, variableName) => s"private $javaType $variableName;" } val arrayStates = arrayCompactedMutableStates.flatMap { case (javaType, mutableStateArrays) => val numArrays = mutableStateArrays.arrayNames.size mutableStateArrays.arrayNames.zipWithIndex.map { case (arrayName, index) => val length = if (index + 1 == numArrays) { mutableStateArrays.getCurrentIndex } else { MUTABLESTATEARRAY_SIZE_LIMIT } if (javaType.contains("[]")) { // initializer had an one-dimensional array variable val baseType = javaType.substring(0, javaType.length - 2) s"private $javaType[] $arrayName = new $baseType[$length][];" } else { // initializer had a scalar variable s"private $javaType[] $arrayName = new $javaType[$length];" } } } (inlinedStates ++ arrayStates).mkString("\\n") } def initMutableStates(): String = { // It's possible that we add same mutable state twice, e.g. the `mergeExpressions` in // `TypedAggregateExpression`, we should call `distinct` here to remove the duplicated ones. val initCodes = mutableStateInitCode.distinct.map(_ + "\\n") // The generated initialization code may exceed 64kb function size limit in JVM if there are too // many mutable states, so split it into multiple functions. splitExpressions(expressions = initCodes, funcName = "init", arguments = Nil) } /** * Code statements to initialize states that depend on the partition index. * An integer `partitionIndex` will be made available within the scope. */ val partitionInitializationStatements: mutable.ArrayBuffer[String] = mutable.ArrayBuffer.empty def addPartitionInitializationStatement(statement: String): Unit = { partitionInitializationStatements += statement } def initPartition(): String = { partitionInitializationStatements.mkString("\\n") } /** * Holds expressions that are equivalent. Used to perform subexpression elimination * during codegen. * * For expressions that appear more than once, generate additional code to prevent * recomputing the value. * * For example, consider two expression generated from this SQL statement: * SELECT (col1 + col2), (col1 + col2) / col3. * * equivalentExpressions will match the tree containing `col1 + col2` and it will only * be evaluated once. */ val equivalentExpressions: EquivalentExpressions = new EquivalentExpressions // Foreach expression that is participating in subexpression elimination, the state to use. var subExprEliminationExprs = Map.empty[Expression, SubExprEliminationState] // The collection of sub-expression result resetting methods that need to be called on each row. val subexprFunctions = mutable.ArrayBuffer.empty[String] val outerClassName = "OuterClass" /** * Holds the class and instance names to be generated, where `OuterClass` is a placeholder * standing for whichever class is generated as the outermost class and which will contain any * inner sub-classes. All other classes and instance names in this list will represent private, * inner sub-classes. */ private val classes: mutable.ListBuffer[(String, String)] = mutable.ListBuffer[(String, String)](outerClassName -> null) // A map holding the current size in bytes of each class to be generated. private val classSize: mutable.Map[String, Int] = mutable.Map[String, Int](outerClassName -> 0) // Nested maps holding function names and their code belonging to each class. private val classFunctions: mutable.Map[String, mutable.Map[String, String]] = mutable.Map(outerClassName -> mutable.Map.empty[String, String]) // Verbatim extra code to be added to the OuterClass. private val extraClasses: mutable.ListBuffer[String] = mutable.ListBuffer[String]() // Returns the size of the most recently added class. private def currClassSize(): Int = classSize(classes.head._1) // Returns the class name and instance name for the most recently added class. private def currClass(): (String, String) = classes.head // Adds a new class. Requires the class' name, and its instance name. private def addClass(className: String, classInstance: String): Unit = { classes.prepend(className -> classInstance) classSize += className -> 0 classFunctions += className -> mutable.Map.empty[String, String] } /** * Adds a function to the generated class. If the code for the `OuterClass` grows too large, the * function will be inlined into a new private, inner class, and a class-qualified name for the * function will be returned. Otherwise, the function will be inlined to the `OuterClass` the * simple `funcName` will be returned. * * @param funcName the class-unqualified name of the function * @param funcCode the body of the function * @param inlineToOuterClass whether the given code must be inlined to the `OuterClass`. This * can be necessary when a function is declared outside of the context * it is eventually referenced and a returned qualified function name * cannot otherwise be accessed. * @return the name of the function, qualified by class if it will be inlined to a private, * inner class */ def addNewFunction( funcName: String, funcCode: String, inlineToOuterClass: Boolean = false): String = { val newFunction = addNewFunctionInternal(funcName, funcCode, inlineToOuterClass) newFunction match { case NewFunctionSpec(functionName, None, None) => functionName case NewFunctionSpec(functionName, Some(_), Some(innerClassInstance)) => innerClassInstance + "." + functionName } } private[this] def addNewFunctionInternal( funcName: String, funcCode: String, inlineToOuterClass: Boolean): NewFunctionSpec = { val (className, classInstance) = if (inlineToOuterClass) { outerClassName -> "" } else if (currClassSize > GENERATED_CLASS_SIZE_THRESHOLD) { val className = freshName("NestedClass") val classInstance = freshName("nestedClassInstance") addClass(className, classInstance) className -> classInstance } else { currClass() } addNewFunctionToClass(funcName, funcCode, className) if (className == outerClassName) { NewFunctionSpec(funcName, None, None) } else { NewFunctionSpec(funcName, Some(className), Some(classInstance)) } } private[this] def addNewFunctionToClass( funcName: String, funcCode: String, className: String) = { classSize(className) += funcCode.length classFunctions(className) += funcName -> funcCode } /** * Declares all function code. If the added functions are too many, split them into nested * sub-classes to avoid hitting Java compiler constant pool limitation. */ def declareAddedFunctions(): String = { val inlinedFunctions = classFunctions(outerClassName).values // Nested, private sub-classes have no mutable state (though they do reference the outer class' // mutable state), so we declare and initialize them inline to the OuterClass. val initNestedClasses = classes.filter(_._1 != outerClassName).map { case (className, classInstance) => s"private $className $classInstance = new $className();" } val declareNestedClasses = classFunctions.filterKeys(_ != outerClassName).map { case (className, functions) => s""" |private class $className { | ${functions.values.mkString("\\n")} |} """.stripMargin } (inlinedFunctions ++ initNestedClasses ++ declareNestedClasses).mkString("\\n") } /** * Emits extra inner classes added with addExtraCode */ def emitExtraCode(): String = { extraClasses.mkString("\\n") } /** * Add extra source code to the outermost generated class. * @param code verbatim source code of the inner class to be added. */ def addInnerClass(code: String): Unit = { extraClasses.append(code) } /** * The map from a variable name to it's next ID. */ private val freshNameIds = new mutable.HashMap[String, Int] freshNameIds += INPUT_ROW -> 1 /** * A prefix used to generate fresh name. */ var freshNamePrefix = "" /** * The map from a place holder to a corresponding comment */ private val placeHolderToComments = new mutable.HashMap[String, String] /** * Returns a term name that is unique within this instance of a `CodegenContext`. */ def freshName(name: String): String = synchronized { val fullName = if (freshNamePrefix == "") { name } else { s"${freshNamePrefix}_$name" } if (freshNameIds.contains(fullName)) { val id = freshNameIds(fullName) freshNameIds(fullName) = id + 1 s"$fullName$id" } else { freshNameIds += fullName -> 1 fullName } } /** * Generates code for equal expression in Java. */ def genEqual(dataType: DataType, c1: String, c2: String): String = dataType match { case BinaryType => s"java.util.Arrays.equals($c1, $c2)" case FloatType => s"(java.lang.Float.isNaN($c1) && java.lang.Float.isNaN($c2)) || $c1 == $c2" case DoubleType => s"(java.lang.Double.isNaN($c1) && java.lang.Double.isNaN($c2)) || $c1 == $c2" case dt: DataType if isPrimitiveType(dt) => s"$c1 == $c2" case dt: DataType if dt.isInstanceOf[AtomicType] => s"$c1.equals($c2)" case array: ArrayType => genComp(array, c1, c2) + " == 0" case struct: StructType => genComp(struct, c1, c2) + " == 0" case udt: UserDefinedType[_] => genEqual(udt.sqlType, c1, c2) case NullType => "false" case _ => throw new IllegalArgumentException( "cannot generate equality code for un-comparable type: " + dataType.simpleString) } /** * Generates code for comparing two expressions. * * @param dataType data type of the expressions * @param c1 name of the variable of expression 1's output * @param c2 name of the variable of expression 2's output */ def genComp(dataType: DataType, c1: String, c2: String): String = dataType match { // java boolean doesn't support > or < operator case BooleanType => s"($c1 == $c2 ? 0 : ($c1 ? 1 : -1))" case DoubleType => s"org.apache.spark.util.Utils.nanSafeCompareDoubles($c1, $c2)" case FloatType => s"org.apache.spark.util.Utils.nanSafeCompareFloats($c1, $c2)" // use c1 - c2 may overflow case dt: DataType if isPrimitiveType(dt) => s"($c1 > $c2 ? 1 : $c1 < $c2 ? -1 : 0)" case BinaryType => s"org.apache.spark.sql.catalyst.util.TypeUtils.compareBinary($c1, $c2)" case NullType => "0" case array: ArrayType => val elementType = array.elementType val elementA = freshName("elementA") val isNullA = freshName("isNullA") val elementB = freshName("elementB") val isNullB = freshName("isNullB") val compareFunc = freshName("compareArray") val minLength = freshName("minLength") val jt = javaType(elementType) val funcCode: String = s""" public int $compareFunc(ArrayData a, ArrayData b) { // when comparing unsafe arrays, try equals first as it compares the binary directly // which is very fast. if (a instanceof UnsafeArrayData && b instanceof UnsafeArrayData && a.equals(b)) { return 0; } int lengthA = a.numElements(); int lengthB = b.numElements(); int $minLength = (lengthA > lengthB) ? lengthB : lengthA; for (int i = 0; i < $minLength; i++) { boolean $isNullA = a.isNullAt(i); boolean $isNullB = b.isNullAt(i); if ($isNullA && $isNullB) { // Nothing } else if ($isNullA) { return -1; } else if ($isNullB) { return 1; } else { $jt $elementA = ${getValue("a", elementType, "i")}; $jt $elementB = ${getValue("b", elementType, "i")}; int comp = ${genComp(elementType, elementA, elementB)}; if (comp != 0) { return comp; } } } if (lengthA < lengthB) { return -1; } else if (lengthA > lengthB) { return 1; } return 0; } """ s"${addNewFunction(compareFunc, funcCode)}($c1, $c2)" case schema: StructType => val comparisons = GenerateOrdering.genComparisons(this, schema) val compareFunc = freshName("compareStruct") val funcCode: String = s""" public int $compareFunc(InternalRow a, InternalRow b) { // when comparing unsafe rows, try equals first as it compares the binary directly // which is very fast. if (a instanceof UnsafeRow && b instanceof UnsafeRow && a.equals(b)) { return 0; } $comparisons return 0; } """ s"${addNewFunction(compareFunc, funcCode)}($c1, $c2)" case other if other.isInstanceOf[AtomicType] => s"$c1.compare($c2)" case udt: UserDefinedType[_] => genComp(udt.sqlType, c1, c2) case _ => throw new IllegalArgumentException( "cannot generate compare code for un-comparable type: " + dataType.simpleString) } /** * Generates code for greater of two expressions. * * @param dataType data type of the expressions * @param c1 name of the variable of expression 1's output * @param c2 name of the variable of expression 2's output */ def genGreater(dataType: DataType, c1: String, c2: String): String = javaType(dataType) match { case JAVA_BYTE | JAVA_SHORT | JAVA_INT | JAVA_LONG => s"$c1 > $c2" case _ => s"(${genComp(dataType, c1, c2)}) > 0" } /** * Generates code to do null safe execution, i.e. only execute the code when the input is not * null by adding null check if necessary. * * @param nullable used to decide whether we should add null check or not. * @param isNull the code to check if the input is null. * @param execute the code that should only be executed when the input is not null. */ def nullSafeExec(nullable: Boolean, isNull: String)(execute: String): String = { if (nullable) { s""" if (!$isNull) { $execute } """ } else { "\\n" + execute } } /** * Splits the generated code of expressions into multiple functions, because function has * 64kb code size limit in JVM. If the class to which the function would be inlined would grow * beyond 1000kb, we declare a private, inner sub-class, and the function is inlined to it * instead, because classes have a constant pool limit of 65,536 named values. * * Note that different from `splitExpressions`, we will extract the current inputs of this * context and pass them to the generated functions. The input is `INPUT_ROW` for normal codegen * path, and `currentVars` for whole stage codegen path. Whole stage codegen path is not * supported yet. * * @param expressions the codes to evaluate expressions. * @param funcName the split function name base. * @param extraArguments the list of (type, name) of the arguments of the split function, * except for the current inputs like `ctx.INPUT_ROW`. * @param returnType the return type of the split function. * @param makeSplitFunction makes split function body, e.g. add preparation or cleanup. * @param foldFunctions folds the split function calls. */ def splitExpressionsWithCurrentInputs( expressions: Seq[String], funcName: String = "apply", extraArguments: Seq[(String, String)] = Nil, returnType: String = "void", makeSplitFunction: String => String = identity, foldFunctions: Seq[String] => String = _.mkString("", ";\\n", ";")): String = { // TODO: support whole stage codegen if (INPUT_ROW == null || currentVars != null) { expressions.mkString("\\n") } else { splitExpressions( expressions, funcName, ("InternalRow", INPUT_ROW) +: extraArguments, returnType, makeSplitFunction, foldFunctions) } } /** * Splits the generated code of expressions into multiple functions, because function has * 64kb code size limit in JVM. If the class to which the function would be inlined would grow * beyond 1000kb, we declare a private, inner sub-class, and the function is inlined to it * instead, because classes have a constant pool limit of 65,536 named values. * * @param expressions the codes to evaluate expressions. * @param funcName the split function name base. * @param arguments the list of (type, name) of the arguments of the split function. * @param returnType the return type of the split function. * @param makeSplitFunction makes split function body, e.g. add preparation or cleanup. * @param foldFunctions folds the split function calls. */ def splitExpressions( expressions: Seq[String], funcName: String, arguments: Seq[(String, String)], returnType: String = "void", makeSplitFunction: String => String = identity, foldFunctions: Seq[String] => String = _.mkString("", ";\\n", ";")): String = { val blocks = buildCodeBlocks(expressions) if (blocks.length == 1) { // inline execution if only one block blocks.head } else { if (Utils.isTesting) { // Passing global variables to the split method is dangerous, as any mutating to it is // ignored and may lead to unexpected behavior. arguments.foreach { case (_, name) => assert(!mutableStateNames.contains(name), s"split function argument $name cannot be a global variable.") } } val func = freshName(funcName) val argString = arguments.map { case (t, name) => s"$t $name" }.mkString(", ") val functions = blocks.zipWithIndex.map { case (body, i) => val name = s"${func}_$i" val code = s""" |private $returnType $name($argString) { | ${makeSplitFunction(body)} |} """.stripMargin addNewFunctionInternal(name, code, inlineToOuterClass = false) } val (outerClassFunctions, innerClassFunctions) = functions.partition(_.innerClassName.isEmpty) val argsString = arguments.map(_._2).mkString(", ") val outerClassFunctionCalls = outerClassFunctions.map(f => s"${f.functionName}($argsString)") val innerClassFunctionCalls = generateInnerClassesFunctionCalls( innerClassFunctions, func, arguments, returnType, makeSplitFunction, foldFunctions) foldFunctions(outerClassFunctionCalls ++ innerClassFunctionCalls) } } /** * Splits the generated code of expressions into multiple sequences of String * based on a threshold of length of a String * * @param expressions the codes to evaluate expressions. */ private def buildCodeBlocks(expressions: Seq[String]): Seq[String] = { val blocks = new ArrayBuffer[String]() val blockBuilder = new StringBuilder() var length = 0 for (code <- expressions) { // We can't know how many bytecode will be generated, so use the length of source code // as metric. A method should not go beyond 8K, otherwise it will not be JITted, should // also not be too small, or it will have many function calls (for wide table), see the // results in BenchmarkWideTable. if (length > 1024) { blocks += blockBuilder.toString() blockBuilder.clear() length = 0 } blockBuilder.append(code) length += CodeFormatter.stripExtraNewLinesAndComments(code).length } blocks += blockBuilder.toString() } /** * Here we handle all the methods which have been added to the inner classes and * not to the outer class. * Since they can be many, their direct invocation in the outer class adds many entries * to the outer class' constant pool. This can cause the constant pool to past JVM limit. * Moreover, this can cause also the outer class method where all the invocations are * performed to grow beyond the 64k limit. * To avoid these problems, we group them and we call only the grouping methods in the * outer class. * * @param functions a [[Seq]] of [[NewFunctionSpec]] defined in the inner classes * @param funcName the split function name base. * @param arguments the list of (type, name) of the arguments of the split function. * @param returnType the return type of the split function. * @param makeSplitFunction makes split function body, e.g. add preparation or cleanup. * @param foldFunctions folds the split function calls. * @return an [[Iterable]] containing the methods' invocations */ private def generateInnerClassesFunctionCalls( functions: Seq[NewFunctionSpec], funcName: String, arguments: Seq[(String, String)], returnType: String, makeSplitFunction: String => String, foldFunctions: Seq[String] => String): Iterable[String] = { val innerClassToFunctions = mutable.LinkedHashMap.empty[(String, String), Seq[String]] functions.foreach(f => { val key = (f.innerClassName.get, f.innerClassInstance.get) val value = f.functionName +: innerClassToFunctions.getOrElse(key, Seq.empty[String]) innerClassToFunctions.put(key, value) }) val argDefinitionString = arguments.map { case (t, name) => s"$t $name" }.mkString(", ") val argInvocationString = arguments.map(_._2).mkString(", ") innerClassToFunctions.flatMap { case ((innerClassName, innerClassInstance), innerClassFunctions) => // for performance reasons, the functions are prepended, instead of appended, // thus here they are in reversed order val orderedFunctions = innerClassFunctions.reverse if (orderedFunctions.size > MERGE_SPLIT_METHODS_THRESHOLD) { // Adding a new function to each inner class which contains the invocation of all the // ones which have been added to that inner class. For example, // private class NestedClass { // private void apply_862(InternalRow i) { ... } // private void apply_863(InternalRow i) { ... } // ... // private void apply(InternalRow i) { // apply_862(i); // apply_863(i); // ... // } // } val body = foldFunctions(orderedFunctions.map(name => s"$name($argInvocationString)")) val code = s""" |private $returnType $funcName($argDefinitionString) { | ${makeSplitFunction(body)} |} """.stripMargin addNewFunctionToClass(funcName, code, innerClassName) Seq(s"$innerClassInstance.$funcName($argInvocationString)") } else { orderedFunctions.map(f => s"$innerClassInstance.$f($argInvocationString)") } } } /** * Perform a function which generates a sequence of ExprCodes with a given mapping between * expressions and common expressions, instead of using the mapping in current context. */ def withSubExprEliminationExprs( newSubExprEliminationExprs: Map[Expression, SubExprEliminationState])( f: => Seq[ExprCode]): Seq[ExprCode] = { val oldsubExprEliminationExprs = subExprEliminationExprs subExprEliminationExprs = newSubExprEliminationExprs val genCodes = f // Restore previous subExprEliminationExprs subExprEliminationExprs = oldsubExprEliminationExprs genCodes } /** * Checks and sets up the state and codegen for subexpression elimination. This finds the * common subexpressions, generates the code snippets that evaluate those expressions and * populates the mapping of common subexpressions to the generated code snippets. The generated * code snippets will be returned and should be inserted into generated codes before these * common subexpressions actually are used first time. */ def subexpressionEliminationForWholeStageCodegen(expressions: Seq[Expression]): SubExprCodes = { // Create a clear EquivalentExpressions and SubExprEliminationState mapping val equivalentExpressions: EquivalentExpressions = new EquivalentExpressions val localSubExprEliminationExprs = mutable.HashMap.empty[Expression, SubExprEliminationState] // Add each expression tree and compute the common subexpressions. expressions.foreach(equivalentExpressions.addExprTree) // Get all the expressions that appear at least twice and set up the state for subexpression // elimination. val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1) val codes = commonExprs.map { e => val expr = e.head // Generate the code for this expression tree. val eval = expr.genCode(this) val state = SubExprEliminationState(eval.isNull, eval.value) e.foreach(localSubExprEliminationExprs.put(_, state)) eval.code.trim } SubExprCodes(codes, localSubExprEliminationExprs.toMap) } /** * Checks and sets up the state and codegen for subexpression elimination. This finds the * common subexpressions, generates the functions that evaluate those expressions and populates * the mapping of common subexpressions to the generated functions. */ private def subexpressionElimination(expressions: Seq[Expression]): Unit = { // Add each expression tree and compute the common subexpressions. expressions.foreach(equivalentExpressions.addExprTree(_)) // Get all the expressions that appear at least twice and set up the state for subexpression // elimination. val commonExprs = equivalentExpressions.getAllEquivalentExprs.filter(_.size > 1) commonExprs.foreach { e => val expr = e.head val fnName = freshName("subExpr") val isNull = addMutableState(JAVA_BOOLEAN, "subExprIsNull") val value = addMutableState(javaType(expr.dataType), "subExprValue") // Generate the code for this expression tree and wrap it in a function. val eval = expr.genCode(this) val fn = s""" |private void $fnName(InternalRow $INPUT_ROW) { | ${eval.code.trim} | $isNull = ${eval.isNull}; | $value = ${eval.value}; |} """.stripMargin // Add a state and a mapping of the common subexpressions that are associate with this // state. Adding this expression to subExprEliminationExprMap means it will call `fn` // when it is code generated. This decision should be a cost based one. // // The cost of doing subexpression elimination is: // 1. Extra function call, although this is probably *good* as the JIT can decide to // inline or not. // The benefit doing subexpression elimination is: // 1. Running the expression logic. Even for a simple expression, it is likely more than 3 // above. // 2. Less code. // Currently, we will do this for all non-leaf only expression trees (i.e. expr trees with // at least two nodes) as the cost of doing it is expected to be low. subexprFunctions += s"${addNewFunction(fnName, fn)}($INPUT_ROW);" val state = SubExprEliminationState(isNull, value) subExprEliminationExprs ++= e.map(_ -> state).toMap } } /** * Generates code for expressions. If doSubexpressionElimination is true, subexpression * elimination will be performed. Subexpression elimination assumes that the code for each * expression will be combined in the `expressions` order. */ def generateExpressions( expressions: Seq[Expression], doSubexpressionElimination: Boolean = false): Seq[ExprCode] = { if (doSubexpressionElimination) subexpressionElimination(expressions) expressions.map(e => e.genCode(this)) } /** * get a map of the pair of a place holder and a corresponding comment */ def getPlaceHolderToComments(): collection.Map[String, String] = placeHolderToComments /** * Register a comment and return the corresponding place holder * * @param placeholderId an optionally specified identifier for the comment's placeholder. * The caller should make sure this identifier is unique within the * compilation unit. If this argument is not specified, a fresh identifier * will be automatically created and used as the placeholder. * @param force whether to force registering the comments */ def registerComment( text: => String, placeholderId: String = "", force: Boolean = false): String = { // By default, disable comments in generated code because computing the comments themselves can // be extremely expensive in certain cases, such as deeply-nested expressions which operate over // inputs with wide schemas. For more details on the performance issues that motivated this // flat, see SPARK-15680. if (force || SparkEnv.get != null && SparkEnv.get.conf.getBoolean("spark.sql.codegen.comments", false)) { val name = if (placeholderId != "") { assert(!placeHolderToComments.contains(placeholderId)) placeholderId } else { freshName("c") } val comment = if (text.contains("\\n") || text.contains("\\r")) { text.split("(\\r\\n)|\\r|\\n").mkString("/**\\n * ", "\\n * ", "\\n */") } else { s"// $text" } placeHolderToComments += (name -> comment) s"/*$name*/" } else { "" } } } /** * A wrapper for generated class, defines a `generate` method so that we can pass extra objects * into generated class. */ abstract class GeneratedClass { def generate(references: Array[Any]): Any } /** * A wrapper for the source code to be compiled by [[CodeGenerator]]. */ class CodeAndComment(val body: String, val comment: collection.Map[String, String]) extends Serializable { override def equals(that: Any): Boolean = that match { case t: CodeAndComment if t.body == body => true case _ => false } override def hashCode(): Int = body.hashCode } /** * A base class for generators of byte code to perform expression evaluation. Includes a set of * helpers for referring to Catalyst types and building trees that perform evaluation of individual * expressions. */ abstract class CodeGenerator[InType <: AnyRef, OutType <: AnyRef] extends Logging { protected val genericMutableRowType: String = classOf[GenericInternalRow].getName /** * Generates a class for a given input expression. Called when there is not cached code * already available. */ protected def create(in: InType): OutType /** * Canonicalizes an input expression. Used to avoid double caching expressions that differ only * cosmetically. */ protected def canonicalize(in: InType): InType /** Binds an input expression to a given input schema */ protected def bind(in: InType, inputSchema: Seq[Attribute]): InType /** Generates the requested evaluator binding the given expression(s) to the inputSchema. */ def generate(expressions: InType, inputSchema: Seq[Attribute]): OutType = generate(bind(expressions, inputSchema)) /** Generates the requested evaluator given already bound expression(s). */ def generate(expressions: InType): OutType = create(canonicalize(expressions)) /** * Create a new codegen context for expression evaluator, used to store those * expressions that don't support codegen */ def newCodeGenContext(): CodegenContext = { new CodegenContext } } object CodeGenerator extends Logging { // This is the value of HugeMethodLimit in the OpenJDK JVM settings final val DEFAULT_JVM_HUGE_METHOD_LIMIT = 8000 // The max valid length of method parameters in JVM. final val MAX_JVM_METHOD_PARAMS_LENGTH = 255 // This is the threshold over which the methods in an inner class are grouped in a single // method which is going to be called by the outer class instead of the many small ones final val MERGE_SPLIT_METHODS_THRESHOLD = 3 // The number of named constants that can exist in the class is limited by the Constant Pool // limit, 65,536. We cannot know how many constants will be inserted for a class, so we use a // threshold of 1000k bytes to determine when a function should be inlined to a private, inner // class. final val GENERATED_CLASS_SIZE_THRESHOLD = 1000000 // This is the threshold for the number of global variables, whose types are primitive type or // complex type (e.g. more than one-dimensional array), that will be placed at the outer class final val OUTER_CLASS_VARIABLES_THRESHOLD = 10000 // This is the maximum number of array elements to keep global variables in one Java array // 32767 is the maximum integer value that does not require a constant pool entry in a Java // bytecode instruction final val MUTABLESTATEARRAY_SIZE_LIMIT = 32768 /** * Compile the Java source code into a Java class, using Janino. * * @return a pair of a generated class and the max bytecode size of generated functions. */ def compile(code: CodeAndComment): (GeneratedClass, Int) = try { cache.get(code) } catch { // Cache.get() may wrap the original exception. See the following URL // http://google.github.io/guava/releases/14.0/api/docs/com/google/common/cache/ // Cache.html#get(K,%20java.util.concurrent.Callable) case e @ (_: UncheckedExecutionException | _: ExecutionError) => throw e.getCause } /** * Compile the Java source code into a Java class, using Janino. */ private[this] def doCompile(code: CodeAndComment): (GeneratedClass, Int) = { val evaluator = new ClassBodyEvaluator() // A special classloader used to wrap the actual parent classloader of // [[org.codehaus.janino.ClassBodyEvaluator]] (see CodeGenerator.doCompile). This classloader // does not throw a ClassNotFoundException with a cause set (i.e. exception.getCause returns // a null). This classloader is needed because janino will throw the exception directly if // the parent classloader throws a ClassNotFoundException with cause set instead of trying to // find other possible classes (see org.codehaus.janinoClassLoaderIClassLoader's // findIClass method). Please also see https://issues.apache.org/jira/browse/SPARK-15622 and // https://issues.apache.org/jira/browse/SPARK-11636. val parentClassLoader = new ParentClassLoader(Utils.getContextOrSparkClassLoader) evaluator.setParentClassLoader(parentClassLoader) // Cannot be under package codegen, or fail with java.lang.InstantiationException evaluator.setClassName("org.apache.spark.sql.catalyst.expressions.GeneratedClass") evaluator.setDefaultImports(Array( classOf[Platform].getName, classOf[InternalRow].getName, classOf[UnsafeRow].getName, classOf[UTF8String].getName, classOf[Decimal].getName, classOf[CalendarInterval].getName, classOf[ArrayData].getName, classOf[UnsafeArrayData].getName, classOf[MapData].getName, classOf[UnsafeMapData].getName, classOf[Expression].getName, classOf[TaskContext].getName, classOf[TaskKilledException].getName, classOf[InputMetrics].getName )) evaluator.setExtendedClass(classOf[GeneratedClass]) logDebug({ // Only add extra debugging info to byte code when we are going to print the source code. evaluator.setDebuggingInformation(true, true, false) s"\\n${CodeFormatter.format(code)}" }) val maxCodeSize = try { evaluator.cook("generated.java", code.body) updateAndGetCompilationStats(evaluator) } catch { case e: InternalCompilerException => val msg = s"failed to compile: $e" logError(msg, e) val maxLines = SQLConf.get.loggingMaxLinesForCodegen logInfo(s"\\n${CodeFormatter.format(code, maxLines)}") throw new InternalCompilerException(msg, e) case e: CompileException => val msg = s"failed to compile: $e" logError(msg, e) val maxLines = SQLConf.get.loggingMaxLinesForCodegen logInfo(s"\\n${CodeFormatter.format(code, maxLines)}") throw new CompileException(msg, e.getLocation) } (evaluator.getClazz().newInstance().asInstanceOf[GeneratedClass], maxCodeSize) } /** * Returns the max bytecode size of the generated functions by inspecting janino private fields. * Also, this method updates the metrics information. */ private def updateAndGetCompilationStats(evaluator: ClassBodyEvaluator): Int = { // First retrieve the generated classes. val classes = { val resultField = classOf[SimpleCompiler].getDeclaredField("result") resultField.setAccessible(true) val loader = resultField.get(evaluator).asInstanceOf[ByteArrayClassLoader] val classesField = loader.getClass.getDeclaredField("classes") classesField.setAccessible(true) classesField.get(loader).asInstanceOf[JavaMap[String, Array[Byte]]].asScala } // Then walk the classes to get at the method bytecode. val codeAttr = Utils.classForName("org.codehaus.janino.util.ClassFile$CodeAttribute") val codeAttrField = codeAttr.getDeclaredField("code") codeAttrField.setAccessible(true) val codeSizes = classes.flatMap { case (_, classBytes) => CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.update(classBytes.length) try { val cf = new ClassFile(new ByteArrayInputStream(classBytes)) val stats = cf.methodInfos.asScala.flatMap { method => method.getAttributes().filter(_.getClass.getName == codeAttr.getName).map { a => val byteCodeSize = codeAttrField.get(a).asInstanceOf[Array[Byte]].length CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.update(byteCodeSize) byteCodeSize } } Some(stats) } catch { case NonFatal(e) => logWarning("Error calculating stats of compiled class.", e) None } }.flatten codeSizes.max } /** * A cache of generated classes. * * From the Guava Docs: A Cache is similar to ConcurrentMap, but not quite the same. The most * fundamental difference is that a ConcurrentMap persists all elements that are added to it until * they are explicitly removed. A Cache on the other hand is generally configured to evict entries * automatically, in order to constrain its memory footprint. Note that this cache does not use * weak keys/values and thus does not respond to memory pressure. */ private val cache = CacheBuilder.newBuilder() .maximumSize(100) .build( new CacheLoader[CodeAndComment, (GeneratedClass, Int)]() { override def load(code: CodeAndComment): (GeneratedClass, Int) = { val startTime = System.nanoTime() val result = doCompile(code) val endTime = System.nanoTime() def timeMs: Double = (endTime - startTime).toDouble / 1000000 CodegenMetrics.METRIC_SOURCE_CODE_SIZE.update(code.body.length) CodegenMetrics.METRIC_COMPILATION_TIME.update(timeMs.toLong) logInfo(s"Code generated in $timeMs ms") result } }) /** * Name of Java primitive data type */ final val JAVA_BOOLEAN = "boolean" final val JAVA_BYTE = "byte" final val JAVA_SHORT = "short" final val JAVA_INT = "int" final val JAVA_LONG = "long" final val JAVA_FLOAT = "float" final val JAVA_DOUBLE = "double" /** * List of java primitive data types */ val primitiveTypes = Seq(JAVA_BOOLEAN, JAVA_BYTE, JAVA_SHORT, JAVA_INT, JAVA_LONG, JAVA_FLOAT, JAVA_DOUBLE) /** * Returns true if a Java type is Java primitive primitive type */ def isPrimitiveType(jt: String): Boolean = primitiveTypes.contains(jt) def isPrimitiveType(dt: DataType): Boolean = isPrimitiveType(javaType(dt)) /** * Returns the specialized code to access a value from `inputRow` at `ordinal`. */ def getValue(input: String, dataType: DataType, ordinal: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$input.get${primitiveTypeName(jt)}($ordinal)" case t: DecimalType => s"$input.getDecimal($ordinal, ${t.precision}, ${t.scale})" case StringType => s"$input.getUTF8String($ordinal)" case BinaryType => s"$input.getBinary($ordinal)" case CalendarIntervalType => s"$input.getInterval($ordinal)" case t: StructType => s"$input.getStruct($ordinal, ${t.size})" case _: ArrayType => s"$input.getArray($ordinal)" case _: MapType => s"$input.getMap($ordinal)" case NullType => "null" case udt: UserDefinedType[_] => getValue(input, udt.sqlType, ordinal) case _ => s"($jt)$input.get($ordinal, null)" } } /** * Returns the code to update a column in Row for a given DataType. */ def setColumn(row: String, dataType: DataType, ordinal: Int, value: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$row.set${primitiveTypeName(jt)}($ordinal, $value)" case t: DecimalType => s"$row.setDecimal($ordinal, $value, ${t.precision})" case udt: UserDefinedType[_] => setColumn(row, udt.sqlType, ordinal, value) // The UTF8String, InternalRow, ArrayData and MapData may came from UnsafeRow, we should copy // it to avoid keeping a "pointer" to a memory region which may get updated afterwards. case StringType | _: StructType | _: ArrayType | _: MapType => s"$row.update($ordinal, $value.copy())" case _ => s"$row.update($ordinal, $value)" } } /** * Update a column in MutableRow from ExprCode. * * @param isVectorized True if the underlying row is of type `ColumnarBatch.Row`, false otherwise */ def updateColumn( row: String, dataType: DataType, ordinal: Int, ev: ExprCode, nullable: Boolean, isVectorized: Boolean = false): String = { if (nullable) { // Can't call setNullAt on DecimalType, because we need to keep the offset if (!isVectorized && dataType.isInstanceOf[DecimalType]) { s""" |if (!${ev.isNull}) { | ${setColumn(row, dataType, ordinal, ev.value)}; |} else { | ${setColumn(row, dataType, ordinal, "null")}; |} """.stripMargin } else { s""" |if (!${ev.isNull}) { | ${setColumn(row, dataType, ordinal, ev.value)}; |} else { | $row.setNullAt($ordinal); |} """.stripMargin } } else { s"""${setColumn(row, dataType, ordinal, ev.value)};""" } } /** * Returns the specialized code to set a given value in a column vector for a given `DataType`. */ def setValue(vector: String, rowId: String, dataType: DataType, value: String): String = { val jt = javaType(dataType) dataType match { case _ if isPrimitiveType(jt) => s"$vector.put${primitiveTypeName(jt)}($rowId, $value);" case t: DecimalType => s"$vector.putDecimal($rowId, $value, ${t.precision});" case t: StringType => s"$vector.putByteArray($rowId, $value.getBytes());" case _ => throw new IllegalArgumentException(s"cannot generate code for unsupported type: $dataType") } } /** * Returns the specialized code to set a given value in a column vector for a given `DataType` * that could potentially be nullable. */ def updateColumn( vector: String, rowId: String, dataType: DataType, ev: ExprCode, nullable: Boolean): String = { if (nullable) { s""" |if (!${ev.isNull}) { | ${setValue(vector, rowId, dataType, ev.value)} |} else { | $vector.putNull($rowId); |} """.stripMargin } else { s"""${setValue(vector, rowId, dataType, ev.value)};""" } } /** * Returns the specialized code to access a value from a column vector for a given `DataType`. */ def getValueFromVector(vector: String, dataType: DataType, rowId: String): String = { if (dataType.isInstanceOf[StructType]) { // `ColumnVector.getStruct` is different from `InternalRow.getStruct`, it only takes an // `ordinal` parameter. s"$vector.getStruct($rowId)" } else { getValue(vector, dataType, rowId) } } /** * Returns the name used in accessor and setter for a Java primitive type. */ def primitiveTypeName(jt: String): String = jt match { case JAVA_INT => "Int" case _ => boxedType(jt) } def primitiveTypeName(dt: DataType): String = primitiveTypeName(javaType(dt)) /** * Returns the Java type for a DataType. */ def javaType(dt: DataType): String = dt match { case BooleanType => JAVA_BOOLEAN case ByteType => JAVA_BYTE case ShortType => JAVA_SHORT case IntegerType | DateType => JAVA_INT case LongType | TimestampType => JAVA_LONG case FloatType => JAVA_FLOAT case DoubleType => JAVA_DOUBLE case _: DecimalType => "Decimal" case BinaryType => "byte[]" case StringType => "UTF8String" case CalendarIntervalType => "CalendarInterval" case _: StructType => "InternalRow" case _: ArrayType => "ArrayData" case _: MapType => "MapData" case udt: UserDefinedType[_] => javaType(udt.sqlType) case ObjectType(cls) if cls.isArray => s"${javaType(ObjectType(cls.getComponentType))}[]" case ObjectType(cls) => cls.getName case _ => "Object" } /** * Returns the boxed type in Java. */ def boxedType(jt: String): String = jt match { case JAVA_BOOLEAN => "Boolean" case JAVA_BYTE => "Byte" case JAVA_SHORT => "Short" case JAVA_INT => "Integer" case JAVA_LONG => "Long" case JAVA_FLOAT => "Float" case JAVA_DOUBLE => "Double" case other => other } def boxedType(dt: DataType): String = boxedType(javaType(dt)) /** * Returns the representation of default value for a given Java Type. * @param jt the string name of the Java type * @param typedNull if true, for null literals, return a typed (with a cast) version */ def defaultValue(jt: String, typedNull: Boolean): String = jt match { case JAVA_BOOLEAN => "false" case JAVA_BYTE => "(byte)-1" case JAVA_SHORT => "(short)-1" case JAVA_INT => "-1" case JAVA_LONG => "-1L" case JAVA_FLOAT => "-1.0f" case JAVA_DOUBLE => "-1.0" case _ => if (typedNull) s"(($jt)null)" else "null" } def defaultValue(dt: DataType, typedNull: Boolean = false): String = defaultValue(javaType(dt), typedNull) /** * Returns the length of parameters for a Java method descriptor. `this` contributes one unit * and a parameter of type long or double contributes two units. Besides, for nullable parameter, * we also need to pass a boolean parameter for the null status. */ def calculateParamLength(params: Seq[Expression]): Int = { def paramLengthForExpr(input: Expression): Int = { val javaParamLength = javaType(input.dataType) match { case JAVA_LONG | JAVA_DOUBLE => 2 case _ => 1 } // For a nullable expression, we need to pass in an extra boolean parameter. (if (input.nullable) 1 else 0) + javaParamLength } // Initial value is 1 for `this`. 1 + params.map(paramLengthForExpr).sum } /** * In Java, a method descriptor is valid only if it represents method parameters with a total * length less than a pre-defined constant. */ def isValidParamLength(paramLength: Int): Boolean = { paramLength <= MAX_JVM_METHOD_PARAMS_LENGTH } }
brad-kaiser/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/CodeGenerator.scala
Scala
apache-2.0
62,998
package cook.config.dsl.buildin import cook.ref.{ Ref, TargetRef } import cook.target.{ Target, TargetResult } import java.io.File import scala.io.Source import scala.reflect.io.{ Path => SPath } import scala.sys.process.ProcessLogger import scala.util.{ Try, Success, Failure } trait Utils { def collectTargets(targets: List[Target[TargetResult]], targetRefs: List[Ref]): List[Target[TargetResult]] = { val m = targets map { t => t.refName -> t } toMap targetRefs map { ref => m(ref.refName) } } def collectTarget(targets: List[Target[TargetResult]], targetRef: Ref): Target[TargetResult] = collectTargets(targets, targetRef :: Nil).head def handleBuildCmd(target: Target[TargetResult])(runWithLoggerOp: ProcessLogger => Unit) { import scala.sys.process._ target.ref.logParentDir.createDirectory() target.ref.buildLogFile.createFile() val f = target.ref.buildLogFile.jfile val logger = ProcessLogger(f) val r = Try { runWithLoggerOp(logger) } logger.flush logger.close if (r.isFailure) { import cook.error._ import cook.console.ops._ reportError { Source.fromFile(f).mkString } } } def runBuildCmdInTargetDir(target: Target[TargetResult])(cmds: Seq[String]*) { import scala.sys.process._ target.ref.logParentDir.createDirectory() target.ref.buildLogFile.createFile() val f = target.ref.buildLogFile.jfile val r = Try { cmds foreach { cmd => Process(cmd, Some(target.buildDir.jfile)) #>> f !! } } if (r.isFailure) { import cook.error._ import cook.console.ops._ reportError { Source.fromFile(f).mkString } } } def runRunCmdInTargetDir(target: Target[TargetResult])(cmds: Seq[String]*) { import scala.sys.process._ cmds foreach { cmd => val exit = Process(cmd, Some(target.runDir.jfile)) ! import cook.error._ import cook.console.ops._ if (exit != 0) { reportError { "Exit with code: " :: strong(exit.toString) } } } } }
timgreen/cook
src/cook/config/dsl/buildin/Utils.scala
Scala
apache-2.0
2,083
/** * Copyright 2012-2013 StackMob * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stackmob.newman import java.nio.charset.Charset object Constants { val UTF8Charset = Charset.forName("UTF-8") }
indykish/newman
src/main/scala/com/stackmob/newman/Constants.scala
Scala
apache-2.0
723
package com.cloudray.scalapress.plugin.ecommerce.controller.admin import org.springframework.stereotype.Controller import org.springframework.web.bind.annotation.{ModelAttribute, RequestParam, RequestMapping} import org.springframework.beans.factory.annotation.Autowired import com.cloudray.scalapress.plugin.ecommerce.domain.Order import javax.servlet.http.HttpServletRequest import org.springframework.ui.ModelMap import com.sksamuel.scoot.soa.Paging import com.cloudray.scalapress.plugin.ecommerce.{OrderDao, ShoppingPluginDao} import com.cloudray.scalapress.item.controller.admin.OrderStatusPopulator import scala.beans.BeanProperty import com.cloudray.scalapress.account.Account import com.cloudray.scalapress.framework.ScalapressContext /** @author Stephen Samuel */ @Controller @RequestMapping(Array("backoffice/order")) class OrderSearchController extends OrderStatusPopulator { @Autowired var shoppingPluginDao: ShoppingPluginDao = _ @Autowired var orderDao: OrderDao = _ @Autowired var context: ScalapressContext = _ @RequestMapping def search(@ModelAttribute("form") form: SearchForm, @RequestParam(value = "pageNumber", defaultValue = "1") pageNumber: Int, model: ModelMap, req: HttpServletRequest): String = { val order = Option(form.orderId) .map(_.replaceAll("\\\\D", "")) .filterNot(_.isEmpty) .map(_.toLong) .flatMap(id => Option(orderDao.find(id))) order match { case Some(o) => "redirect:/backoffice/order/" + o.id case None => val query = new OrderQuery query.pageNumber = pageNumber query.pageSize = 20 query.status = Option(form.status) query.name = Option(form.name) query.orderId = Option(form.orderId) val orders = orderDao.search(query) if (orders != null) { model.put("orders", orders.java) model.put("paging", Paging(req, orders)) } "admin/plugin/shopping/order/list.vm" } } @RequestMapping(value = Array("create"), produces = Array("text/html")) def create(req: HttpServletRequest, @RequestParam(value = "accountId", defaultValue = "0") accountId: Long) = { val account = accountId match { case 0 => val account = Account(context.accountTypeDao.default) account.status = Account.STATUS_ACTIVE account.name = "New Account" context.accountDao.save(account) account case _ => context.accountDao.find(accountId) } val u = Order(req.getRemoteAddr, account) orderDao.save(u) "redirect:/backoffice/order" } @ModelAttribute("form") def form = new SearchForm() } class SearchForm { @BeanProperty var orderId: String = _ @BeanProperty var status: String = _ @BeanProperty var name: String = _ }
vidyacraghav/scalapress
src/main/scala/com/cloudray/scalapress/plugin/ecommerce/controller/admin/OrderSearchController.scala
Scala
apache-2.0
2,807
/** Test pattern matching and finally, see SI-5929. */ object Test extends App { def bar(s1: Object, s2: Object): Unit = { s1 match { case _ => } try { () } finally { s2 match { case _ => } } } def x = { null match { case _ => } try { 1 } finally { while(false) { } } } bar(null, null) x }
som-snytt/dotty
tests/run/patmat-finally.scala
Scala
apache-2.0
366
import Board._ import BoardIO._ import Reduce._ object FindRightBorders { /** * For a board `top` with only a top row, return a list all possible completions * for the right side. Each board in the list is guaranteed to have isValid == true. */ def generateRightBorders(top: Board): List[Board] = { (1 to 9).toArray .permutations .filter(perm => (perm.head == 9)) // (last entry of top row = 9) .map(perm => top.withNewRightSide(perm)) .filter(board => board.isValid) .toList } def main(args: Array[String]): Unit = { println (" --- Running Step 1 ---") // By renaming entries, the top row can be assumed to be 123456789. val topBorder: Board = emptyBoard.withNewTopRow((1 to 9).toArray) // Generate all valid configurations for the right border of the board. val borders = generateRightBorders(topBorder) assert(borders.length == 21600) writeToFile(borders, "top-right-all.txt") // Boards with full top and right side are structurally invariant under a diagonal flip. // Use this symmetry to remove redundant entries. def topRightSymmetries: Board => List[Board] = (b: Board) => List(b, b.mirrorAntiDiagonal) val reducedBorders = reduceBoardList(borders, topRightSymmetries) assert(reducedBorders.length == 52) // Double check that reduction is ok. Write reduced boards to file. verifyReduction(borders, reducedBorders, topRightSymmetries) writeToFile(reducedBorders, "top-right-reduced.txt") } }
matiasdahl/Boundary-Sudoku
src/main/scala/FindRightBorders.scala
Scala
mit
1,563
package keystoneml.nodes.learning.internal import scala.collection.mutable.ArrayBuffer import breeze.linalg._ import breeze.numerics._ import breeze.math._ import breeze.stats._ import org.apache.spark.rdd.RDD import org.apache.spark.HashPartitioner import edu.berkeley.cs.amplab.mlmatrix.util.{Utils => MLMatrixUtils} import keystoneml.pipelines.Logging import keystoneml.utils.{MatrixUtils, Stats} object ReWeightedLeastSquaresSolver extends Logging { /** * Use BCD to solve W = (X.t * (diag(B) * X) + \lambda * I) \ X.t * (B .* Y) * * @param blockSize blockSize to use for Block Coordinate Descent * @param numBlocks number of blocks in the input * @param numIter number of iterations of BCD to run * @param lambda L2 regularization parameter * @param numFeatures number of features in the input (columns in X) * @param numClasses number of classes (columns in Y) * @param trainingFeatureBlocks blocks of input features * @param labelsZm zero-mean'd labels matrix * @param weights diagonal values for the weights matrix * @param featureMean mean to be subtracted from X before the solve * * @returns model (W) split as blocks and the final residual (X*W) */ def trainWithL2( blockSize: Int, numBlocks: Int, numIter: Int, lambda: Double, numFeatures: Int, numClasses: Int, trainingFeatureBlocks: Seq[RDD[DenseVector[Double]]], labelsZm: RDD[DenseVector[Double]], weights: RDD[Double], featureMean: DenseVector[Double]) : (Seq[DenseMatrix[Double]], RDD[DenseMatrix[Double]]) = { val labelsZmMat = labelsZm.mapPartitions { iter => MatrixUtils.rowsToMatrixIter(iter) } val weightsMat = weights.mapPartitions { iter => if (iter.hasNext) { Iterator.single(DenseVector(iter.toArray)) } else { Iterator.empty } } var residual = labelsZmMat.map { l => DenseMatrix.zeros[Double](l.rows, l.cols) }.cache() // Initialize model to blockSize. This will be resized if its different // inside the solve loop val model = (0 until numBlocks).map { block => DenseMatrix.zeros[Double](blockSize, numClasses) }.toArray val treeBranchingFactor = weights.context.getConf.getInt( "spark.mlmatrix.treeBranchingFactor", 2).toInt val depth = math.max(math.ceil(math.log(labelsZmMat.partitions.size) / math.log(treeBranchingFactor)).toInt, 1) val aTaCache = new Array[DenseMatrix[Double]](numBlocks) (0 until numIter).foreach { pass => (0 until numBlocks).foreach { block => val aPart = trainingFeatureBlocks(block) // Zero-mean the training features val featureMeanBlock = featureMean( block * blockSize until min((block+1) * blockSize, numFeatures)) val aPartMatZm = aPart.mapPartitions { iter => val matIter = MatrixUtils.rowsToMatrixIter(iter) matIter.map { mat => (mat(*, ::) - featureMeanBlock) } }.cache().setName("aPartMatZm") aPartMatZm.count // Compute X.t * (diag(B) * X) if this the first iteration if (pass == 0) { val aTaComputed = MLMatrixUtils.treeReduce(aPartMatZm.zip(weightsMat).map { x => // Multiplication by diagonal is same as hadamard product per column x._1.t * ( x._1(::, *) :* x._2 ) }, (a: DenseMatrix[Double], b: DenseMatrix[Double]) => a += b, depth=depth) aTaCache(block) = aTaComputed model(block) = DenseMatrix.zeros[Double](aTaComputed.rows, numClasses) } val aTaBlock = aTaCache(block) val modelBC = weightsMat.context.broadcast(model(block)) val aTbBlock = MLMatrixUtils.treeReduce( aPartMatZm.zip(labelsZmMat.zip(residual.zip(weightsMat))).map { x => val featPart = x._1 val labelPart = x._2._1 val resPart = x._2._2._1 val weightPart = x._2._2._2 // TODO(shivaram): This might generate a lot of GC ? // Remove B.*(X * wOld) from the residual val xWOld = (featPart * modelBC.value) val resUpdated = resPart - ( xWOld(::, *) :* weightPart ) // Compute X.t * ((B .* Y) - residual) val aTbPart = featPart.t * ((labelPart(::, *) :* weightPart) - resUpdated) aTbPart }, (a: DenseMatrix[Double], b: DenseMatrix[Double]) => a += b, depth = depth) val newModel = (aTaBlock + (DenseMatrix.eye[Double](aTaBlock.rows) * lambda)) \ aTbBlock val newModelBC = weights.context.broadcast(newModel) model(block) = newModel // Update the residual by adding B.*(X * wNew) and subtracting B.*(X * wOld) val newResidual = aPartMatZm.zip(residual.zip(weightsMat)).map { part => val diffModel = newModelBC.value - modelBC.value val xWDiff = (part._1 * diffModel) part._2._1 += (xWDiff(::, *) :* part._2._2) part._2._1 }.cache().setName("residual") newResidual.count residual.unpersist() residual = newResidual aPartMatZm.unpersist() } } (model, residual) } }
amplab/keystone
src/main/scala/keystoneml/nodes/learning/internal/ReWeightedLeastSquares.scala
Scala
apache-2.0
5,252
package com.rocketfuel.sdbc.base import scala.collection._ import shapeless._ trait ParameterizedQuery { self: ParameterValue => trait ParameterizedQuery[Self <: ParameterizedQuery[Self]] extends Logger { override protected def logClass: Class[_] = classOf[com.rocketfuel.sdbc.base.ParameterizedQuery] def parameters: Parameters def parameterPositions: ParameterPositions /** * Parameters that you must set before running the query. */ lazy val unassignedParameters: Set[String] = parameterPositions.keySet -- parameters.keySet /** * All the parameters have values. */ def isComplete: Boolean = parameterPositions.size == parameters.size def +(parameterKvp: (String, ParameterValue)): Self = on(parameterKvp) def ++(parameterKvps: IterableOnce[(String, ParameterValue)]): Self = onParameters(parameterKvps.toMap) /** * The same query, with no parameters having values. */ def clear: Self = subclassConstructor(parameters = Parameters.empty) def on(parameters: (String, ParameterValue)*): Self = { onParameters(parameters.toMap) } def onParameters(additionalParameters: Parameters): Self = { subclassConstructor(setParameters(additionalParameters)) } def onProduct[ A, Repr <: HList, Key <: Symbol, AsParameters <: HList ](t: A )(implicit p: Parameters.Products[A, Repr, Key, AsParameters] ): Self = { subclassConstructor(setParameters(Parameters.product(t))) } def onRecord[ Repr <: HList, Key <: Symbol, AsParameters <: HList ](t: Repr )(implicit r: Parameters.Records[Repr, Key, AsParameters] ): Self = { subclassConstructor(setParameters(Parameters.record(t))) } protected def filter(p: Parameters): Parameters = { p.filter(kvp => parameterPositions.contains(kvp._1)) } protected def setParameters(additionalParameters: Parameters): Parameters = { val parametersHavingPositions = filter(additionalParameters) parameters ++ parametersHavingPositions } protected def subclassConstructor(parameters: Parameters): Self //Subtractable implementation def -(parameterName: String): Self = subclassConstructor(parameters = parameters - parameterName) def --(parameterNames: String*): Self = subclassConstructor(parameters = parameters -- parameterNames) } }
rocketfuel/sdbc
base/src/main/scala-2.13/com/rocketfuel/sdbc/base/ParameterizedQuery.scala
Scala
bsd-3-clause
2,462
package Problems import scala.io.Source object p22 extends Problem { lazy val names = Source.fromInputStream(getClass.getResourceAsStream("p022_names.txt")).mkString .split(",").map(_.replace("\\"", "")).sorted.zipWithIndex val A = 'A'.toLong - 1 override def solve(): Long = names.map { case (name, idx) => (idx + 1) * name.map(c => c.toLong - A).sum }.sum }
catap/scala-euler
src/main/scala/Problems/p22.scala
Scala
unlicense
380
/**Copyright (c) 2012-2015 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.enrich.common package enrichments package registry // Java import java.lang.{ Float => JFloat } // Specs2 import org.specs2.Specification // Joda import org.joda.time.DateTime // Json4s import org.json4s._ import org.json4s.jackson.JsonMethods.parse // Snowplow import com.snowplowanalytics.iglu.client.SchemaKey // Scala weather import com.snowplowanalytics.weather._ object WeatherEnrichmentSpec { val OwmApiKey = "OWM_KEY" } import WeatherEnrichmentSpec._ class WeatherEnrichmentSpec extends Specification { def is = "This is a specification to test the WeatherEnrichment" ^ skipAllIf(sys.env.get(OwmApiKey).isEmpty) ^ // Actually only e4 and e6 need to be skipped "Fail event for null time" ! e1^ "Fail event for invalid key" ! e5^ "Fail with invalid cache size" ! e3^ "Weather enrichment client is lazy" ! e2^ "Extract weather stamp" ! e4^ "Extract humidity" ! e6^ "Extract configuration" ! e7^ "Check time stamp transformation" ! e8^ end lazy val validAppKey = sys.env.get(OwmApiKey).getOrElse(throw new IllegalStateException(s"No ${OwmApiKey} environment variable found, test should have been skipped")) object invalidEvent { var lat: JFloat = 70.98224f var lon: JFloat = 70.98224f var time: DateTime = null } object validEvent { var lat: JFloat = 20.713052f var lon: JFloat = 70.98224f var time: DateTime = new DateTime("2015-08-29T23:56:01.003+00:00") } def e1 = { val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10) val stamp = enr.getWeatherContext(Option(invalidEvent.lat), Option(invalidEvent.lon), Option(invalidEvent.time)) stamp.toEither must beLeft.like { case e => e must contain("tstamp: None") } } def e2 = WeatherEnrichment("KEY", 0, 1, "history.openweathermap.org", 5) must not(throwA[IllegalArgumentException]) def e3 = { val enr = WeatherEnrichment("KEY", 0, 1, "history.openweathermap.org", 5) val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time)) stamp.toEither must beLeft.like { case e => { e must contain("IllegalArgumentException") } } } def e4 = { val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 10) val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time)) stamp.toEither must beRight } def e5 = { val enr = WeatherEnrichment("KEY", 5200, 1, "history.openweathermap.org", 10) val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time)) stamp.toEither must beLeft.like { case e => e must contain("AuthorizationError") } } def e6 = { val enr = WeatherEnrichment(validAppKey, 5200, 1, "history.openweathermap.org", 15) val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time)) stamp.toEither must beRight.like { case weather: JValue => { val temp = weather.findField { case JField("humidity", _) => true; case _ => false } temp must beSome(("humidity", JDouble(100.0))) } } } def e7 = { val configJson = parse( """ |{ | "enabled": true, | "vendor": "com.snowplowanalytics.snowplow.enrichments", | "name": "weather_enrichment_config", | "parameters": { | "apiKey": "{{KEY}}", | "cacheSize": 5100, | "geoPrecision": 1, | "apiHost": "history.openweathermap.org", | "timeout": 5 | } |} """.stripMargin) val config = WeatherEnrichmentConfig.parse(configJson, SchemaKey("com.snowplowanalytics.snowplow.enrichments", "weather_enrichment_config", "jsonschema", "1-0-0")) config.toEither must beRight(WeatherEnrichment(apiKey = "{{KEY}}", geoPrecision = 1, cacheSize = 5100, apiHost = "history.openweathermap.org", timeout = 5)) } def e8 = { implicit val formats = DefaultFormats val enr = WeatherEnrichment(validAppKey, 2, 1, "history.openweathermap.org", 15) val stamp = enr.getWeatherContext(Option(validEvent.lat), Option(validEvent.lon), Option(validEvent.time)) stamp.toEither must beRight.like { // successful request case weather: JValue => { val e = (weather \\ "data").extractOpt[TransformedWeather] e.map(_.dt) must beSome.like { // succesfull transformation case dt => dt must equalTo("2015-08-30T00:00:00.000Z") // closest stamp storing on server } } } } }
chuwy/snowplow-ci
3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/WeatherEnrichmentSpec.scala
Scala
apache-2.0
5,750
/* * Copyright 2015 Commonwealth Computer Research, Inc. * * Licensed under the Apache License, Version 2.0 (the License); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.locationtech.geomesa.feature.kryo import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import java.util.{Date, UUID} import org.apache.commons.codec.binary.Base64 import org.junit.runner.RunWith import org.locationtech.geomesa.feature.EncodingOption.EncodingOptions import org.locationtech.geomesa.feature._ import org.locationtech.geomesa.security.SecurityUtils import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner import scala.collection.JavaConversions._ import scala.languageFeature.postfixOps @RunWith(classOf[JUnitRunner]) class KryoFeatureSerializerTest extends Specification { "KryoFeatureSerializer" should { "correctly serialize and deserialize basic features" in { val spec = "a:Integer,b:Float,c:Double,d:Long,e:UUID,f:String,g:Boolean,dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("a", "1") sf.setAttribute("b", "1.0") sf.setAttribute("c", "5.37") sf.setAttribute("d", "-100") sf.setAttribute("e", UUID.randomUUID()) sf.setAttribute("f", "mystring") sf.setAttribute("g", java.lang.Boolean.FALSE) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") "using byte arrays" >> { val serializer = KryoFeatureSerializer(sft) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "using streams" >> { val serializer = KryoFeatureSerializer(sft) val out = new ByteArrayOutputStream() serializer.write(sf, out) val in = new ByteArrayInputStream(out.toByteArray) val deserialized = serializer.read(in) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "without user data" >> { val sft = SimpleFeatureTypes.createType("testType", "dtg:Date,*geom:Point:srid=4326") val sf = new ScalaSimpleFeature("testId", sft) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") val vis = "u&usa&fouo" SecurityUtils.setFeatureVisibility(sf, vis) val serializer = KryoFeatureSerializer(sft, EncodingOptions.none) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) SecurityUtils.getVisibility(deserialized) must beNull } "with user data" >> { val sft = SimpleFeatureTypes.createType("testType", "dtg:Date,*geom:Point:srid=4326") val sf = new ScalaSimpleFeature("testId", sft) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") val vis = "u&usa&fouo" SecurityUtils.setFeatureVisibility(sf, vis) val serializer = KryoFeatureSerializer(sft, EncodingOptions.withUserData) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized.getUserData mustEqual sf.getUserData } } "correctly serialize and deserialize different geometries" in { val spec = "a:LineString,b:Polygon,c:MultiPoint,d:MultiLineString,e:MultiPolygon," + "f:GeometryCollection,dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("a", "LINESTRING(0 2, 2 0, 8 6)") sf.setAttribute("b", "POLYGON((20 10, 30 0, 40 10, 30 20, 20 10))") sf.setAttribute("c", "MULTIPOINT(0 0, 2 2)") sf.setAttribute("d", "MULTILINESTRING((0 2, 2 0, 8 6),(0 2, 2 0, 8 6))") sf.setAttribute("e", "MULTIPOLYGON(((-1 0, 0 1, 1 0, 0 -1, -1 0)), ((-2 6, 1 6, 1 3, -2 3, -2 6)), " + "((-1 5, 2 5, 2 2, -1 2, -1 5)))") sf.setAttribute("f", "MULTIPOINT(0 0, 2 2)") sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(55.0 49.0)") "using byte arrays" >> { val serializer = KryoFeatureSerializer(sft) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "using streams" >> { val serializer = KryoFeatureSerializer(sft) val out = new ByteArrayOutputStream() serializer.write(sf, out) val in = new ByteArrayInputStream(out.toByteArray) val deserialized = serializer.read(in) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } } "correctly serialize and deserialize collection types" in { val spec = "a:Integer,m:Map[String,Double],l:List[Date],dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("a", "1") sf.setAttribute("m", Map("test1" -> 1.0, "test2" -> 2.0)) sf.setAttribute("l", List(new Date(100), new Date(200))) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") "using byte arrays" >> { val serializer = KryoFeatureSerializer(sft) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "using streams" >> { val serializer = KryoFeatureSerializer(sft) val out = new ByteArrayOutputStream() serializer.write(sf, out) val in = new ByteArrayInputStream(out.toByteArray) val deserialized = serializer.read(in) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } } // NB: this doesn't actually seem to cause the error I was seeing, but // ScaldingDelimitedIngestJobTest in geomesa-tools does cause it... "correctly serialize with a type having the same field names but different field types" in { val spec = "a:Integer,m:List[String],l:Map[Double,Date],dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("a", "1") sf.setAttribute("m", List("test1", "test2")) sf.setAttribute("l", Map(1.0 -> new Date(100), 2.0 -> new Date(200))) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") "using byte arrays" >> { val serializer = KryoFeatureSerializer(sft) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "using streams" >> { val serializer = KryoFeatureSerializer(sft) val out = new ByteArrayOutputStream() serializer.write(sf, out) val in = new ByteArrayInputStream(out.toByteArray) val deserialized = serializer.read(in) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } } "correctly serialize and deserialize null values" in { val spec = "a:Integer,b:Float,c:Double,d:Long,e:UUID,f:String,g:Boolean,l:List,m:Map," + "dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) "using byte arrays" >> { val serializer = KryoFeatureSerializer(sft) val serialized = serializer.write(sf) val deserialized = serializer.read(serialized) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes.foreach(_ must beNull) deserialized.getAttributes mustEqual sf.getAttributes } "using streams" >> { val serializer = KryoFeatureSerializer(sft) val out = new ByteArrayOutputStream() serializer.write(sf, out) val in = new ByteArrayInputStream(out.toByteArray) val deserialized = serializer.read(in) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes.foreach(_ must beNull) deserialized.getAttributes mustEqual sf.getAttributes } } "correctly project features" in { val sft = SimpleFeatureTypes.createType("fullType", "name:String,*geom:Point,dtg:Date") val projectedSft = SimpleFeatureTypes.createType("projectedType", "*geom:Point") val sf = new ScalaSimpleFeature("testFeature", sft) sf.setAttribute("name", "foo") sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") "when serializing" >> { val serializer = KryoFeatureSerializer(sft, projectedSft, EncodingOptions.none) val deserializer = KryoFeatureSerializer(projectedSft) val serialized = serializer.write(sf) val deserialized = deserializer.read(serialized) deserialized.getID mustEqual sf.getID deserialized.getDefaultGeometry mustEqual sf.getDefaultGeometry deserialized.getAttributeCount mustEqual 1 } "when deserializing" >> { val serializer = KryoFeatureSerializer(sft) val deserializer = KryoFeatureSerializer(sft, projectedSft, EncodingOptions.none) val serialized = serializer.write(sf) val deserialized = deserializer.read(serialized) deserialized.getID mustEqual sf.getID deserialized.getDefaultGeometry mustEqual sf.getDefaultGeometry deserialized.getAttributeCount mustEqual 1 } } "be backwards compatible" in { val spec = "dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") val serializer = KryoFeatureSerializer(sft) // base64 encoded bytes from version 0 of the kryo feature serializer val version0SerializedBase64 = "AGZha2Vp5AEAAAE7+I60ABUAAAAAAUBGgAAAAAAAQEiAAAAAAAA=" val version0Bytes = Base64.decodeBase64(version0SerializedBase64) val deserialized = serializer.read(version0Bytes) deserialized must not beNull; deserialized.getType mustEqual sf.getType deserialized.getAttributes mustEqual sf.getAttributes } "be faster than old version" in { skipped("integration") val spec = "dtg:Date,*geom:Point:srid=4326" val sft = SimpleFeatureTypes.createType("testType", spec) val sf = new ScalaSimpleFeature("fakeid", sft) sf.setAttribute("dtg", "2013-01-02T00:00:00.000Z") sf.setAttribute("geom", "POINT(45.0 49.0)") val serializer0 = KryoFeatureSerializer(sft) val serializer1 = KryoFeatureSerializer(sft) val version0SerializedBase64 = "AGZha2Vp5AEAAAE7+I60ABUAAAAAAUBGgAAAAAAAQEiAAAAAAAA=" val version0Bytes = Base64.decodeBase64(version0SerializedBase64) val version1Bytes = serializer1.write(sf) // prime the serialization serializer0.read(version0Bytes) serializer1.read(version1Bytes) val start2 = System.currentTimeMillis() (0 until 1000000).foreach { _ => serializer0.read(version0Bytes) } println(s"took ${System.currentTimeMillis() - start2}ms") val start = System.currentTimeMillis() (0 until 1000000).foreach { _ => serializer1.read(version1Bytes) } println(s"took ${System.currentTimeMillis() - start}ms") val start3 = System.currentTimeMillis() (0 until 1000000).foreach { _ => serializer1.read(version1Bytes) } println(s"took ${System.currentTimeMillis() - start3}ms") val start4 = System.currentTimeMillis() (0 until 1000000).foreach { _ => serializer0.read(version0Bytes) } println(s"took ${System.currentTimeMillis() - start4}ms") println println success } } }
mmatz-ccri/geomesa
geomesa-feature/src/test/scala/org/locationtech/geomesa/feature/kryo/KryoFeatureSerializerTest.scala
Scala
apache-2.0
13,531
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.graphx.impl import scala.reflect.ClassTag import org.apache.spark.Logging import org.apache.spark.graphx._ import org.apache.spark.graphx.util.collection.PrimitiveKeyOpenHashMap import org.apache.spark.util.collection.BitSet private[graphx] object VertexPartition { def apply[VD: ClassTag](iter: Iterator[(VertexId, VD)]): VertexPartition[VD] = { val map = new PrimitiveKeyOpenHashMap[VertexId, VD] iter.foreach { case (k, v) => map(k) = v } new VertexPartition(map.keySet, map._values, map.keySet.getBitSet) } def apply[VD: ClassTag](iter: Iterator[(VertexId, VD)], mergeFunc: (VD, VD) => VD) : VertexPartition[VD] = { val map = new PrimitiveKeyOpenHashMap[VertexId, VD] iter.foreach { case (k, v) => map.setMerge(k, v, mergeFunc) } new VertexPartition(map.keySet, map._values, map.keySet.getBitSet) } } private[graphx] class VertexPartition[@specialized(Long, Int, Double) VD: ClassTag]( val index: VertexIdToIndexMap, val values: Array[VD], val mask: BitSet, /** A set of vids of active vertices. May contain vids not in index due to join rewrite. */ private val activeSet: Option[VertexSet] = None) extends Logging { val capacity: Int = index.capacity def size: Int = mask.cardinality() /** Return the vertex attribute for the given vertex ID. */ def apply(vid: VertexId): VD = values(index.getPos(vid)) def isDefined(vid: VertexId): Boolean = { val pos = index.getPos(vid) pos >= 0 && mask.get(pos) } /** Look up vid in activeSet, throwing an exception if it is None. */ def isActive(vid: VertexId): Boolean = { activeSet.get.contains(vid) } /** The number of active vertices, if any exist. */ def numActives: Option[Int] = activeSet.map(_.size) /** * Pass each vertex attribute along with the vertex id through a map * function and retain the original RDD's partitioning and index. * * @tparam VD2 the type returned by the map function * * @param f the function applied to each vertex id and vertex * attribute in the RDD * * @return a new VertexPartition with values obtained by applying `f` to * each of the entries in the original VertexRDD. The resulting * VertexPartition retains the same index. */ def map[VD2: ClassTag](f: (VertexId, VD) => VD2): VertexPartition[VD2] = { // Construct a view of the map transformation val newValues = new Array[VD2](capacity) var i = mask.nextSetBit(0) while (i >= 0) { newValues(i) = f(index.getValue(i), values(i)) i = mask.nextSetBit(i + 1) } new VertexPartition[VD2](index, newValues, mask) } /** * Restrict the vertex set to the set of vertices satisfying the given predicate. * * @param pred the user defined predicate * * @note The vertex set preserves the original index structure which means that the returned * RDD can be easily joined with the original vertex-set. Furthermore, the filter only * modifies the bitmap index and so no new values are allocated. */ def filter(pred: (VertexId, VD) => Boolean): VertexPartition[VD] = { // Allocate the array to store the results into val newMask = new BitSet(capacity) // Iterate over the active bits in the old mask and evaluate the predicate var i = mask.nextSetBit(0) while (i >= 0) { if (pred(index.getValue(i), values(i))) { newMask.set(i) } i = mask.nextSetBit(i + 1) } new VertexPartition(index, values, newMask) } /** * Hides vertices that are the same between this and other. For vertices that are different, keeps * the values from `other`. The indices of `this` and `other` must be the same. */ def diff(other: VertexPartition[VD]): VertexPartition[VD] = { if (index != other.index) { logWarning("Diffing two VertexPartitions with different indexes is slow.") diff(createUsingIndex(other.iterator)) } else { val newMask = mask & other.mask var i = newMask.nextSetBit(0) while (i >= 0) { if (values(i) == other.values(i)) { newMask.unset(i) } i = newMask.nextSetBit(i + 1) } new VertexPartition(index, other.values, newMask) } } /** Left outer join another VertexPartition. */ def leftJoin[VD2: ClassTag, VD3: ClassTag] (other: VertexPartition[VD2]) (f: (VertexId, VD, Option[VD2]) => VD3): VertexPartition[VD3] = { if (index != other.index) { logWarning("Joining two VertexPartitions with different indexes is slow.") leftJoin(createUsingIndex(other.iterator))(f) } else { val newValues = new Array[VD3](capacity) var i = mask.nextSetBit(0) while (i >= 0) { val otherV: Option[VD2] = if (other.mask.get(i)) Some(other.values(i)) else None newValues(i) = f(index.getValue(i), values(i), otherV) i = mask.nextSetBit(i + 1) } new VertexPartition(index, newValues, mask) } } /** Left outer join another iterator of messages. */ def leftJoin[VD2: ClassTag, VD3: ClassTag] (other: Iterator[(VertexId, VD2)]) (f: (VertexId, VD, Option[VD2]) => VD3): VertexPartition[VD3] = { leftJoin(createUsingIndex(other))(f) } /** Inner join another VertexPartition. */ def innerJoin[U: ClassTag, VD2: ClassTag](other: VertexPartition[U]) (f: (VertexId, VD, U) => VD2): VertexPartition[VD2] = { if (index != other.index) { logWarning("Joining two VertexPartitions with different indexes is slow.") innerJoin(createUsingIndex(other.iterator))(f) } else { val newMask = mask & other.mask val newValues = new Array[VD2](capacity) var i = newMask.nextSetBit(0) while (i >= 0) { newValues(i) = f(index.getValue(i), values(i), other.values(i)) i = newMask.nextSetBit(i + 1) } new VertexPartition(index, newValues, newMask) } } /** * Inner join an iterator of messages. */ def innerJoin[U: ClassTag, VD2: ClassTag] (iter: Iterator[Product2[VertexId, U]]) (f: (VertexId, VD, U) => VD2): VertexPartition[VD2] = { innerJoin(createUsingIndex(iter))(f) } /** * Similar effect as aggregateUsingIndex((a, b) => a) */ def createUsingIndex[VD2: ClassTag](iter: Iterator[Product2[VertexId, VD2]]) : VertexPartition[VD2] = { val newMask = new BitSet(capacity) val newValues = new Array[VD2](capacity) iter.foreach { case (vid, vdata) => val pos = index.getPos(vid) if (pos >= 0) { newMask.set(pos) newValues(pos) = vdata } } new VertexPartition[VD2](index, newValues, newMask) } /** * Similar to innerJoin, but vertices from the left side that don't appear in iter will remain in * the partition, hidden by the bitmask. */ def innerJoinKeepLeft(iter: Iterator[Product2[VertexId, VD]]): VertexPartition[VD] = { val newMask = new BitSet(capacity) val newValues = new Array[VD](capacity) System.arraycopy(values, 0, newValues, 0, newValues.length) iter.foreach { case (vid, vdata) => val pos = index.getPos(vid) if (pos >= 0) { newMask.set(pos) newValues(pos) = vdata } } new VertexPartition(index, newValues, newMask) } def aggregateUsingIndex[VD2: ClassTag]( iter: Iterator[Product2[VertexId, VD2]], reduceFunc: (VD2, VD2) => VD2): VertexPartition[VD2] = { val newMask = new BitSet(capacity) val newValues = new Array[VD2](capacity) iter.foreach { product => val vid = product._1 val vdata = product._2 val pos = index.getPos(vid) if (pos >= 0) { if (newMask.get(pos)) { newValues(pos) = reduceFunc(newValues(pos), vdata) } else { // otherwise just store the new value newMask.set(pos) newValues(pos) = vdata } } } new VertexPartition[VD2](index, newValues, newMask) } def replaceActives(iter: Iterator[VertexId]): VertexPartition[VD] = { val newActiveSet = new VertexSet iter.foreach(newActiveSet.add(_)) new VertexPartition(index, values, mask, Some(newActiveSet)) } /** * Construct a new VertexPartition whose index contains only the vertices in the mask. */ def reindex(): VertexPartition[VD] = { val hashMap = new PrimitiveKeyOpenHashMap[VertexId, VD] val arbitraryMerge = (a: VD, b: VD) => a for ((k, v) <- this.iterator) { hashMap.setMerge(k, v, arbitraryMerge) } new VertexPartition(hashMap.keySet, hashMap._values, hashMap.keySet.getBitSet) } def iterator: Iterator[(VertexId, VD)] = mask.iterator.map(ind => (index.getValue(ind), values(ind))) def vidIterator: Iterator[VertexId] = mask.iterator.map(ind => index.getValue(ind)) }
sryza/spark
graphx/src/main/scala/org/apache/spark/graphx/impl/VertexPartition.scala
Scala
apache-2.0
9,619
package opennlp.scalabha.tag import opennlp.scalabha.util.CollectionUtils._ import opennlp.scalabha.tag.hmm.HmmUtils /** * Tag sequences of symbols. * * @tparam Sym visible symbols in the sequences * @tparam Tag tags applied to symbols */ trait Tagger[Sym, Tag] { /** * Tag each sequence using this model. * * @param rawSequences unlabeled data to be tagged * @return sequences tagged by the model */ final def tag(rawSequences: Seq[IndexedSeq[Sym]]): Seq[IndexedSeq[(Sym, Tag)]] = (rawSequences zip tagAll(rawSequences)).mapt((ws, tagged) => tagged.getOrElse(throw new RuntimeException("could not tag sentence: '%s'".format(ws.mkString(" "))))) /** * Tag each sequence using this model. * * @param rawSequences unlabeled data to be tagged * @return sequences tagged by the model */ final def tagAll(rawSequences: Seq[IndexedSeq[Sym]]): Seq[Option[IndexedSeq[(Sym, Tag)]]] = (rawSequences zip tags(rawSequences)).map { case (ws, ts) => ts.map(ws zip _) } /** * Tag each sequence using this model. * * @param rawSequences unlabeled data to be tagged * @return sequences of tags returned from the model */ protected def tags(rawSequences: Seq[IndexedSeq[Sym]]): Seq[Option[IndexedSeq[Tag]]] = rawSequences.map(tagSequence) /** * Tag the sequence using this model. * * @param sequence a single sequence to be tagged * @return the tagging of the input sequence assigned by the model */ protected def tagSequence(sequence: IndexedSeq[Sym]): Option[IndexedSeq[Tag]] = sys.error("not implemented") } /** * Factory for training a Tagger directly from labeled data. * * @tparam Sym visible symbols in the sequences * @tparam Tag tags applied to symbols */ trait SupervisedTaggerTrainer[Sym, Tag] { /** * Train a Tagger directly from labeled data. * * @param taggedSequences labeled sequences to use for training the model * @param tagDict a tag dictionary * @return a trained Tagger */ final def train(taggedSequences: Iterable[IndexedSeq[(Sym, Tag)]]): Tagger[Sym, Tag] = { val (transitionCounts, emissionCounts) = HmmUtils.getCountsFromTagged(taggedSequences) makeTagger(transitionCounts, emissionCounts) } /** * Train a Tagger directly from counts. * * @param taggedSequences labeled sequences to use for training the model * @param tagDict a tag dictionary * @return a trained Tagger */ def makeTagger[N: Numeric](transitionCounts: Map[Option[Tag], Map[Option[Tag], N]], emissionCounts: Map[Option[Tag], Map[Option[Sym], N]]): Tagger[Sym, Tag] } /** * Factory for training a Tagger from unlabeled data. * * @tparam Sym visible symbols in the sequences * @tparam Tag tags applied to symbols */ trait TypesupervisedTaggerTrainer[Sym, Tag] { /** * Train a Tagger only on unlabeled data. Initialize a starting point * (initial tagging) from random uniformed draws from the tag dictionary. * * @param rawSequences unlabeled sequences to be used as unsupervised training data * @param tagDict a mapping from symbols to their possible tags * @return a trained Tagger */ def train( rawSequences: Iterable[IndexedSeq[Sym]], tagDict: TagDict[Sym, Tag]): Tagger[Sym, Tag] /** * Train a Tagger from a combination of unlabeled data and GOLD labeled data. * The labeled data is ONLY used for prior counts; it is NOT iterated over * like the raw data. The tagged data could even be EMPTY. * * @param rawSequences unlabeled sequences to be used as unsupervised training data * @param goldTaggedSequences labeled sequences to be used as supervised training data * @param tagDict a mapping from symbols to their possible tags * @return a trained Tagger */ def trainWithSomeGoldLabeled( rawSequences: Iterable[IndexedSeq[Sym]], goldTaggedSequences: Iterable[IndexedSeq[(Sym, Tag)]], tagDict: TagDict[Sym, Tag]): Tagger[Sym, Tag] /** * Train a Tagger from a combination of unlabeled data and NOISY labeled data. * The labeled data is NOT used for prior counts, NOR is it iterated over * like the raw data. The tagged data could even be EMPTY. * * @param rawSequences unlabeled sequences to be used as unsupervised training data * @param noisyTaggedSequences labeled sequences to be used as supervised training data * @param tagDict a mapping from symbols to their possible tags * @return a trained Tagger */ def trainWithSomeNoisyLabeled( rawSequences: Iterable[IndexedSeq[Sym]], noisyTaggedSequences: Iterable[IndexedSeq[(Sym, Tag)]], tagDict: TagDict[Sym, Tag]): Tagger[Sym, Tag] }
eponvert/Scalabha
src/main/scala/opennlp/scalabha/tag/Tagger.scala
Scala
apache-2.0
4,722
// Copyright (C) 2014 Fehmi Can Saglam (@fehmicans) and contributors. // See the LICENCE.txt file distributed with this work for additional // information regarding copyright ownership. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package reactivemongo.extensions.dao import reactivemongo.api.{ MongoDriver, DefaultDB } import reactivemongo.extensions.util.Misc.UUID import scala.concurrent.ExecutionContext.Implicits.global object MongoContext { val driver = new MongoDriver val connection = driver.connection(List("localhost")) def db: DefaultDB = connection("test-reactivemongo-extensions") def randomDb: DefaultDB = connection(UUID()) }
ReactiveMongo/ReactiveMongo-Extensions
core/src/test/scala/dao/MongoContext.scala
Scala
apache-2.0
1,161
/* * OpenVC, an open source VHDL compiler/simulator * Copyright (C) 2010 Christian Reisinger * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package at.jku.ssw.openvc /** * This object is the compiler that provides the `compile` method to compile a compilation unit. * @author <a href="mailto:chr_reisinger@yahoo.de">Christian Reisinger</a> */ object VHDLCompiler { import parser.SyntaxAnalyzer import semanticAnalyzer.{PreAnalyzerTransformer, SemanticAnalyzer} import backend.BackendPhase /** * compiles a compilation unit and returns the result * * @example {{{ * val unit = VHDLCompiler.compile(new CompilationUnit(SourceFile.fromFile("test.vhd"), configuration))) * unit.printMessages(new java.io.PrintWriter(System.out)) * }}} * @param unit the compilation unit to compile * @return a compilation unit with the [[at.jku.ssw.openvc.ast.ASTNode]] and the compiler messages */ def compile(unit: CompilationUnit): CompilationUnit = { import annotation.tailrec import unit.configuration @tailrec def run(phases: Seq[Phase], unit: CompilationUnit): CompilationUnit = phases match { case Seq() => unit case Seq(phase, xs@_*) => val phaseStart = System.currentTimeMillis val newUnit = phase(unit) val phaseEnd = System.currentTimeMillis - phaseStart if (configuration.XdebugCompiler) println(phase.name + " time:" + phaseEnd) run(xs, newUnit) } val phases = configuration.XrunOnlyToPhase match { case None => AllPhases case Some(phase) => AllPhases.take(AllPhases.indexWhere(_.name == phase) + 1) } run(phases, unit) } /** all phases needed by the compiler to completely compile one compilation unit */ val AllPhases = Seq(SyntaxAnalyzer, PreAnalyzerTransformer, SemanticAnalyzer, BackendPhase) }
chrreisinger/OpenVC
src/main/scala/at/jku/ssw/openvc/VHDLCompiler.scala
Scala
gpl-3.0
2,493
/** * Copyright 2013-2015 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.paypal.cascade.common.tests.trys import org.specs2._ import org.scalacheck.Prop.{Exception => PropException, _} import org.scalacheck.Arbitrary._ import scala.util.Try import com.paypal.cascade.common.trys._ import com.paypal.cascade.common.tests.scalacheck._ /** * RichTry is a convenience wrapper for converting Try to Either */ class RichTrySpecs extends Specification with ScalaCheck { override def is = s2""" RichTry is an implicit wrapper for Try objects toEither should on a Try[A] Success, return an Either[Throwable, A] Right with the Success value ${ToEither.SuccessCase().ok} on a Try[A] Failure, return an Either[Throwable, A] Left with the Failure exception ${ToEither.FailureCase().fails} toEither[LeftT] should on a Try[A] Success, return an Either[LeftT, A] Right with the Success value ${ToEitherWithConversion.SuccessCase().ok} on a Try[A] Failure, return an Either[LeftT, A] Left with the converted Failure value ${ToEitherWithConversion.FailureCase().fails} toFuture should on a Try[A] Success, return a Future.successful(value) ${ToFuture.SuccessCase().ok} on a Try[A] Failure, return a Future.failed(value) ${ToFuture.FailureCase().fails} """ object ToEither { case class SuccessCase() { def ok = forAll(arbitrary[String]) { s => Try { s }.toEither must beRight.like { case v: String => v must beEqualTo(s) } } } case class FailureCase() { def fails = forAll(arbitrary[Exception]) { e => Try[String] { throw e }.toEither must beLeft.like { case v: Exception => v must beEqualTo(e) } } } } object ToEitherWithConversion { case class SuccessCase() { def ok = forAll(arbitrary[String]) { s => Try { s }.toEither[Int](_.getMessage.length) must beRight.like { case v: String => v must beEqualTo(s) } } } case class FailureCase() { def fails = forAll(arbitrary[Exception], arbitrary[Int]) { (e, i) => Try[String] { throw e }.toEither[Int](_ => i) must beLeft.like { case v: Int => v must beEqualTo(i) } } } } object ToFuture { case class SuccessCase() { def ok = forAll(arbitrary[String]) { s => val res = Try { s }.toFuture res.value.get must beASuccessfulTry } } case class FailureCase() { def fails = forAll(arbitrary[Exception]) { e => val res = Try { throw e }.toFuture res.value.get must beAFailedTry } } } }
2rs2ts/cascade
common/src/test/scala/com/paypal/cascade/common/tests/trys/RichTrySpecs.scala
Scala
apache-2.0
3,239
package stormlantern.consul.client.dao import java.util.UUID import org.parboiled.common.Base64 import spray.json._ import scala.util.control.NonFatal trait ConsulHttpProtocol extends DefaultJsonProtocol { implicit val uuidFormat = new JsonFormat[UUID] { override def read(json: JsValue): UUID = json match { case JsString(uuid) => try { UUID.fromString(uuid) } catch { case NonFatal(e) => deserializationError("Expected UUID, but got " + uuid) } case x => deserializationError("Expected UUID as JsString, but got " + x) } override def write(obj: UUID): JsValue = JsString(obj.toString) } implicit val binaryDataFormat = new JsonFormat[BinaryData] { override def read(json: JsValue): BinaryData = json match { case JsString(data) => try { BinaryData(Base64.rfc2045().decode(data)) } catch { case NonFatal(e) => deserializationError("Expected base64 encoded binary data, but got " + data) } case x => deserializationError("Expected base64 encoded binary data as JsString, but got " + x) } override def write(obj: BinaryData): JsValue = JsString(Base64.rfc2045().encodeToString(obj.data, false)) } implicit val serviceFormat = jsonFormat( (node: String, address: String, serviceId: String, serviceName: String, serviceTags: Option[Set[String]], serviceAddress: String, servicePort: Int) => ServiceInstance(node, address, serviceId, serviceName, serviceTags.getOrElse(Set.empty), serviceAddress, servicePort), "Node", "Address", "ServiceID", "ServiceName", "ServiceTags", "ServiceAddress", "ServicePort") implicit val httpCheckFormat = jsonFormat(HttpHealthCheck, "HTTP", "Interval") implicit val scriptCheckFormat = jsonFormat(ScriptHealthCheck, "Script", "Interval") implicit val ttlCheckFormat = jsonFormat(TTLHealthCheck, "TTL") implicit val checkWriter = lift { new JsonWriter[HealthCheck] { override def write(obj: HealthCheck): JsValue = obj match { case obj: ScriptHealthCheck => obj.toJson case obj: HttpHealthCheck => obj.toJson case obj: TTLHealthCheck => obj.toJson } } } implicit val serviceRegistrationFormat = jsonFormat(ServiceRegistration, "Name", "ID", "Tags", "Address", "Port", "Check") implicit val sessionCreationFormat = jsonFormat(SessionCreation, "LockDelay", "Name", "Node", "Checks", "Behavior", "TTL") implicit val keyDataFormat = jsonFormat(KeyData, "Key", "CreateIndex", "ModifyIndex", "LockIndex", "Flags", "Value", "Session") implicit val sessionInfoFormat = jsonFormat(SessionInfo, "LockDelay", "Checks", "Node", "ID", "CreateIndex", "Name", "Behavior", "TTL") }
derjust/reactive-consul
client/src/main/scala/stormlantern/consul/client/dao/ConsulHttpProtocol.scala
Scala
mit
2,692
package spire package syntax package std import spire.math.ConvertableTo trait IntSyntax { implicit def literalIntOps(n: Int): LiteralIntOps = new LiteralIntOps(n) implicit def intToA[A](n:Int)(implicit c:ConvertableTo[A]): A = c.fromInt(n) } trait LongSyntax { implicit def literalLongOps(n: Long): LiteralLongOps = new LiteralLongOps(n) } trait DoubleSyntax { implicit def literalDoubleOps(n: Double): LiteralDoubleOps = new LiteralDoubleOps(n) } trait BigIntSyntax { implicit def literalBigIntOps(b: BigInt): LiteralBigIntOps = new LiteralBigIntOps(b) } trait ArraySyntax { implicit def arrayOps[@sp A](lhs:Array[A]): ArrayOps[A] = new ArrayOps(lhs) } trait SeqSyntax { implicit def seqOps[@sp A, CC[A] <: Iterable[A]](lhs:CC[A]): SeqOps[A, CC] = new SeqOps[A, CC](lhs) implicit def indexedSeqOps[@sp A, CC[A] <: IndexedSeq[A]](lhs:CC[A]): IndexedSeqOps[A, CC] = new IndexedSeqOps[A, CC](lhs) }
non/spire
core/src/main/scala/spire/syntax/std/Syntax.scala
Scala
mit
922
package slick.lifted import slick.ast.{LiteralNode, IfThenElse, Node, BaseTypedType, OptionTypedType, TypedType} import slick.SlickException /** `Case` provides a DSL for conditional statements in the query language. * An arbitrary number of `If`...`Then` expressions can be chained, optionally * followed by `Else`, e.g.: * {{{ * Case If u.id < 3 Then "low" If u.id < 6 Then "medium" Else "high" * }}} * All result expressions have to be of compatible type (modulo nullability). * If at least one of them is an `Option` type or the `Else` branch is * missing, the result is also an `Option`. */ object Case { def If[C <: Rep[_] : CanBeQueryCondition](cond: C) = new UntypedWhen(cond.toNode) final class UntypedWhen(cond: Node) { def Then[P, B](res: Rep[P])(implicit om: OptionMapperDSL.arg[B, P]#to[B, P], bType: BaseTypedType[B]) = new TypedCase[B, P](Vector(cond, res.toNode))(bType, om.liftedType(bType)) } final class TypedCase[B : TypedType, T : TypedType](clauses: Vector[Node]) extends Rep.TypedRep[Option[B]] { def toNode = IfThenElse(clauses :+ LiteralNode(null)).nullExtend def If[C <: Rep[_] : CanBeQueryCondition](cond: C) = new TypedWhen[B,T](cond.toNode, clauses) def Else(res: Rep[T]): Rep[T] = Rep.forNode(IfThenElse(clauses :+ res.toNode).nullExtend) } final class TypedWhen[B : TypedType, T : TypedType](cond: Node, parentClauses: Vector[Node]) { def Then(res: Rep[T]) = new TypedCase[B,T](parentClauses ++ Vector(cond, res.toNode)) } }
lukasz-golebiewski/slick
slick/src/main/scala/slick/lifted/Case.scala
Scala
bsd-2-clause
1,521
package weld.expressions trait DescBuilder { def builder: StringBuilder def appendPrefix(p: String): DescBuilder = { newBuilder(p.length).append(p) } def append(b: String, sep: String, e: String, exprs: Seq[ExprLike]): DescBuilder = { val newBuilder = appendPrefix(b) var first = true exprs.foreach { expr => if (!first) { newBuilder.append(sep) } newBuilder.append(expr) first = false } newBuilder.append(e) } def append(e: ExprLike): DescBuilder = { e.buildDesc(this) this } def append(s: String): DescBuilder = { builder.append(s) this } def newLine(): DescBuilder def newBuilder(increment: Int): DescBuilder def desc: String = builder.toString() } case class IndentedDescBuilder(builder: StringBuilder = new StringBuilder, indent: Int = 0) extends DescBuilder { override def newBuilder(increment: Int): DescBuilder = copy(indent = indent + increment) override def newLine(): DescBuilder = append("\\n").append(" " * indent) } case class SimpleDescBuilder(builder: StringBuilder = new StringBuilder) extends DescBuilder { override def newBuilder(increment: Int): DescBuilder = this override def newLine(): DescBuilder = append(" ") }
hvanhovell/weld-java
src/main/scala/weld/expressions/DescBuilder.scala
Scala
bsd-3-clause
1,251
// Solution-1.scala // Solution to Exercise 1 in "Constructors" import com.atomicscala.AtomicTest._ class Coffee(val shots:Int = 2, val decaf:Int = 0, val milk:Boolean = false, val toGo:Boolean = false, val syrup:String = "") { var result = "" println(decaf, milk, toGo, syrup) def getCup():Unit = { if(toGo) { result += "ToGoCup " } else { result += "HereCup " } } def caf():Int = { shots - decaf } def pourShots():Unit = { for(s <- 0 until shots) { if(decaf > 0) { result += "decaf shot " } else { result += "shot " } } } def addMilk():Unit = { if(milk) { result += "milk " } } def addSyrup():Unit = { result += syrup } getCup() pourShots() addMilk() addSyrup() } val doubleHalfCaf = new Coffee(shots=2, decaf=1) val tripleHalfCaf = new Coffee(shots=3, decaf=2) doubleHalfCaf.decaf is 1 doubleHalfCaf.caf() is 1 doubleHalfCaf.shots is 2 tripleHalfCaf.decaf is 2 tripleHalfCaf.caf() is 1 tripleHalfCaf.shots is 3 /* OUTPUT_SHOULD_BE (1,1,false,false,) (1,2,false,false,) 1 1 2 2 1 3 */
P7h/ScalaPlayground
Atomic Scala/atomic-scala-solutions/25_Constructors/Solution-1.scala
Scala
apache-2.0
1,162
package aoc.day17 import io.IO object Part1 extends App { /* --- Day 17: No Such Thing as Too Much --- The elves bought too much eggnog again - 150 liters this time. To fit it all into your refrigerator, you'll need to move it into smaller containers. You take an inventory of the capacities of the available containers. For example, suppose you have containers of size 20, 15, 10, 5, and 5 liters. If you need to store 25 liters, there are four ways to do it: 15 and 10 20 and 5 (the first 5) 20 and 5 (the second 5) 15, 5, and 5 Filling all containers entirely, how many different combinations of containers can exactly fit all 150 liters of eggnog? */ def combinationsWithRepetition[T](l: List[T], size: Int): List[List[T]] = { (size, l) match { case (0, _) => // There is only one way to take 0 element from a list // It is to take the empty list. List(List()) case (_, List()) => // There is no way to take element from an empty list // (except by 0, dealt by the above case) List() case (s, head :: tail) => combinationsWithRepetition(tail, s - 1).map(ll => head :: ll) ++ combinationsWithRepetition(tail, s) } } val input = IO.getLines(_.toInt) var somme = 0 val solutions = for { i <- 1 to input.length potentialSol <- combinationsWithRepetition(input, i) if potentialSol.sum == 150 } { somme += 1 } println(s"The number of different combinations of containers that can exactly fit all 150 liters of eggnog is: $somme") }
GuillaumeDD/AdventOfCode2015
src/main/scala/aoc/day17/Part1.scala
Scala
gpl-3.0
1,587
import scala.quoted._ object Foo { inline def inspectBody(inline i: Int): String = ${ inspectBodyImpl('i) } def inspectBodyImpl(x: Expr[Int])(using qctx: QuoteContext) : Expr[String] = { import qctx.tasty._ def definitionString(sym: Symbol): Expr[String] = if sym.isClassDef || sym.isDefDef || sym.isValDef then Expr(sym.tree.showExtractors) else '{"NO DEFINTION"} x.unseal match { case Inlined(None, Nil, arg) => definitionString(arg.symbol) case arg => definitionString(arg.symbol) // TODO should all by name parameters be in an inline node } } }
som-snytt/dotty
tests/run-custom-args/Yretain-trees/tasty-load-tree-2/quoted_1.scala
Scala
apache-2.0
604
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.internal.operators import monix.execution.exceptions.DummyException import monix.reactive.Observable import scala.concurrent.duration._ import scala.concurrent.duration.Duration.Zero object OnErrorRecoverWithSuite extends BaseOperatorSuite { def createObservable(sourceCount: Int) = Some { val fallback = Observable.range(0, 10) val source = Observable .range(0L, sourceCount.toLong) .endWithError(DummyException("expected")) val obs = source.onErrorHandleWith { case DummyException("expected") => fallback case other => Observable.raiseError(other) } val sum = sourceCount * (sourceCount - 1) / 2 + 9 * 5 Sample(obs, sourceCount + 10, sum, Zero, Zero) } def observableInError(sourceCount: Int, ex: Throwable) = if (sourceCount <= 1) None else Some { val fallback = Observable.range(0, 10) val source = Observable.range(0L, sourceCount.toLong).endWithError(ex) val obs = source.onErrorHandleWith { case DummyException("not happening") => fallback case other => Observable.raiseError(other) } val sum = sourceCount * (sourceCount - 1) / 2 Sample(obs, sourceCount, sum, Zero, Zero) } def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some { val source = Observable .range(0L, sourceCount.toLong) .endWithError(DummyException("expected")) val obs = source.onErrorHandleWith { case DummyException("expected") => throw ex case other => Observable.raiseError(other) } val sum = sourceCount * (sourceCount - 1) / 2 Sample(obs, sourceCount, sum, Zero, Zero) } override def cancelableObservables() = { val fallback = Observable.range(0, 10).delayOnNext(1.second) val sample = Observable .range(0, 10) .map(_ => 1L) .delayOnNext(1.second) .endWithError(DummyException("expected")) .onErrorHandleWith { case DummyException("expected") => fallback case other => Observable.raiseError(other) } Seq( Sample(sample, 0, 0, 0.seconds, 0.seconds), Sample(sample, 10, 10, 10.seconds, 0.seconds) ) } }
monixio/monix
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/OnErrorRecoverWithSuite.scala
Scala
apache-2.0
2,936
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.fuberlin.wiwiss.silk.linkagerule.similarity import math.min import de.fuberlin.wiwiss.silk.entity.Index /** * A simple similarity measure, which compares pairs of values. */ abstract class SimpleDistanceMeasure extends DistanceMeasure { /** * Computes the distance between two strings. * * @param value1 The first string. * @param value2 The second string. * @param limit If the expected distance between both strings exceeds this limit, this method may * return Double.PositiveInfinity instead of the actual distance in order to save computation time. * @return A positive number that denotes the computed distance between both strings. */ def evaluate(value1: String, value2: String, limit: Double = Double.PositiveInfinity): Double /** * Computes the index of a single value. */ def indexValue(value: String, limit: Double): Index = Index.default override final def apply(values1: Traversable[String], values2: Traversable[String], limit: Double): Double = { var minDistance = Double.MaxValue for (str1 <- values1; str2 <- values2 if minDistance > 0.0) { val distance = evaluate(str1, str2, min(limit, minDistance)) minDistance = min(minDistance, distance) } minDistance } override final def index(values: Set[String], limit: Double): Index = { if(values.isEmpty) indexValue("", limit) //We index an empty value, so that the index is empty but has the right size else values.map(indexValue(_, limit)).reduce(_ merge _) } }
fusepoolP3/p3-silk
silk-core/src/main/scala/de/fuberlin/wiwiss/silk/linkagerule/similarity/SimpleDistanceMeasure.scala
Scala
apache-2.0
2,118
/* The MIT License (MIT) Copyright (c) 2016 Tom Needham Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.thomas.needham.neurophidea.examples.scala import java.io._ import java.util import org.neuroph.core.NeuralNetwork import org.neuroph.core.learning.{SupervisedTrainingElement, TrainingSet} import org.neuroph.nnet.MultiLayerPerceptron import org.neuroph.nnet.learning.BackPropagation import org.neuroph.util.TransferFunctionType object TestMultiLayerPerceptron { var inputSize: Int = 8 var outputSize: Int = 1 var network: NeuralNetwork = _ var trainingSet: TrainingSet[SupervisedTrainingElement] = _ var testingSet: TrainingSet[SupervisedTrainingElement] = _ var layers: Array[Int] = Array(8, 8, 1) def loadNetwork() { network = NeuralNetwork.load("D:/GitHub/Neuroph-Intellij-Plugin/TestMultiLayerPerceptron.nnet") } def trainNetwork() { val list = new util.ArrayList[Integer]() for (layer <- layers) { list.add(layer) } network = new MultiLayerPerceptron(list, TransferFunctionType.SIGMOID); trainingSet = new TrainingSet[SupervisedTrainingElement](inputSize, outputSize) trainingSet = TrainingSet.createFromFile("D:/GitHub/NeuralNetworkTest/Classroom Occupation Data.csv", inputSize, outputSize, ",").asInstanceOf[TrainingSet[SupervisedTrainingElement]] val learningRule = new BackPropagation(); network.setLearningRule(learningRule) network.learn(trainingSet) network.save("D:/GitHub/Neuroph-Intellij-Plugin/TestMultiLayerPerceptron.nnet") } def testNetwork() { var input = "" val fromKeyboard = new BufferedReader(new InputStreamReader(System.in)) val testValues = new util.ArrayList[Double]() var testValuesDouble: Array[Double] = null do { try { println("Enter test values or \\"\\": ") input = fromKeyboard.readLine() if (input == "") { //break } input = input.replace(" ", "") val stringVals = input.split(",") testValues.clear() for (value <- stringVals) { testValues.add(value.toDouble) } } catch { case ioe: IOException => ioe.printStackTrace(System.err) case nfe: NumberFormatException => nfe.printStackTrace(System.err) } testValuesDouble = Array.ofDim[Double](testValues.size) for (t <- testValuesDouble.indices) { testValuesDouble(t) = testValues.get(t).doubleValue() } network.setInput(testValuesDouble: _*) network.calculate() } while (input != "") } def testNetworkAuto(setPath: String) { var total: Double = 0.0 val list = new util.ArrayList[Integer]() val outputLine = new util.ArrayList[String]() for (layer <- layers) { list.add(layer) } testingSet = TrainingSet.createFromFile(setPath, inputSize, outputSize, ",").asInstanceOf[TrainingSet[SupervisedTrainingElement]] val count = testingSet.elements().size var averageDeviance = 0.0 var resultString = "" try { val file = new File("Results " + setPath) val fw = new FileWriter(file) val bw = new BufferedWriter(fw) for (i <- 0 until testingSet.elements().size) { var expected: Double = 0.0 var calculated: Double = 0.0 network.setInput(testingSet.elementAt(i).getInput: _*) network.calculate() calculated = network.getOutput()(0) expected = testingSet.elementAt(i).getIdealArray()(0) println("Calculated Output: " + calculated) println("Expected Output: " + expected) println("Deviance: " + (calculated - expected)) averageDeviance += math.abs(math.abs(calculated) - math.abs(expected)) total += network.getOutput()(0) resultString = "" for (cols <- testingSet.elementAt(i).getInputArray.indices) { resultString += testingSet.elementAt(i).getInputArray()(cols) + ", " } for (t <- network.getOutput.indices) { resultString += network.getOutput()(t) + ", " } resultString = resultString.substring(0, resultString.length - 2) resultString += "" bw.write(resultString) bw.flush() } println() println("Average: " + total / count) println("Average Deviance % : " + (averageDeviance / count) * 100) bw.flush() bw.close() } catch { case ex: IOException => ex.printStackTrace() } } }
06needhamt/Neuroph-Intellij-Plugin
neuroph-plugin/src/com/thomas/needham/neurophidea/examples/scala/TestMultiLayerPerceptron.scala
Scala
mit
5,417
package services import java.util.UUID import com.softwaremill.bootzooka.test.{FlatSpecWithDb, TestHelpersWithDb} import com.flowy.fomoapi.services.UserRegisterResult import com.flowy.common.models.User import org.scalatest.Matchers import scala.concurrent.Await import scala.concurrent.duration._ class UserServiceSpec extends FlatSpecWithDb with Matchers with TestHelpersWithDb { override protected def beforeEach() = { super.beforeEach() userDao.add(newUser("Admin", "first", "admin@sml.com", "pass", "salt")).futureValue userDao.add(newUser("Admin2", "second", "admin2@sml.com", "pass", "salt")).futureValue } "registerNewUser" should "add user with unique lowercase login info" in { // When val result = userService.registerNewUser("John", "Doe", "newUser@sml.com", "password").futureValue // Then result should be(UserRegisterResult.Success) val userOpt = userDao.findByEmail("newUser@sml.com").futureValue userOpt should be('defined) val user = userOpt.get user.firstName should be("John") user.lastName should be("Doe") emailService.wasEmailSentTo("newUser@sml.com") should be(true) } "registerNewUser" should "not register a user if a user with the given login/e-mail exists" in { // when val resultInitial = userService.registerNewUser("John", "Doe", "newUser@sml.com", "password").futureValue val resultSameLogin = userService.registerNewUser("John", "Doe", "newUser2@sml.com", "password").futureValue val resultSameEmail = userService.registerNewUser("John2", "Doe", "newUser@sml.com", "password").futureValue // then resultInitial should be(UserRegisterResult.Success) resultSameLogin should matchPattern { case UserRegisterResult.Success => } resultSameEmail should matchPattern { case UserRegisterResult.UserExists(_) => } } "registerNewUser" should "not schedule an email on existing login" in { // When userService.registerNewUser("Admin", "One", "admin3@sml.com", "password").futureValue // Then emailService.wasEmailSentTo("admin@sml.com") should be(false) } "changeEmail" should "change email for specified user" in { val userFuture = userDao.findByEmail("admin@sml.com") val userValue = Await.result(userFuture, 5 seconds) val newEmail = "new@email.com" userService.changeEmail(userValue.get.id, newEmail).futureValue should be('right) userDao.findByEmail(newEmail).futureValue match { case Some(cu) => // ok case None => fail("User not found. Maybe e-mail wasn't really changed?") } } "changeEmail" should "not change email if already used by someone else" in { userService.changeEmail(UUID.randomUUID(), "admin2@sml.com").futureValue should be('left) } "changePassword" should "change password if current is correct and new is present" in { // Given val user = userDao.findByEmail("admin@sml.com").futureValue.get val currentPassword = "pass" val newPassword = "newPass" // When val changePassResult = userService.changePassword(user.id, currentPassword, newPassword).futureValue // Then changePassResult should be('right) userDao.findByEmail("admin@sml.com").futureValue match { case Some(cu) => cu.passwordHash should be(User.encryptPassword(newPassword, cu.salt)) case None => fail("Something bad happened, maybe mocked Dao is broken?") } } "changePassword" should "not change password if current is incorrect" in { // Given val user = userDao.findByEmail("admin@sml.com").futureValue.get // When, Then userService.changePassword(user.id, "someillegalpass", "newpass").futureValue should be('left) } "changePassword" should "complain when user cannot be found" in { userService.changePassword(UUID.randomUUID(), "pass", "newpass").futureValue should be('left) } }
asciiu/fomo
api/src/test/scala/services/UserServiceSpec.scala
Scala
apache-2.0
3,894
package com.github.antidata.bootstrap import akka.actor._ import com.github.antidata.actors.{HtmMasterActor, HtmModelActor, HtmModelsClusterListener} import com.github.antidata.managers.HtmModelsManager import com.typesafe.config.ConfigFactory import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings} object Boot { var systemRef: ActorSystem = null def startup(ports: Seq[String]) = { ports foreach { port => // Override the configuration of the port val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).withFallback(ConfigFactory.load()) // Create an Akka system val system = ActorSystem("HtmModelsSystem", config) val shardSettings = ClusterShardingSettings(system)//.withRole("") ClusterSharding(system).start( typeName = HtmModelActor.shardName, entityProps = HtmModelActor.props(), settings = shardSettings, extractEntityId = HtmModelActor.idExtractor, extractShardId = HtmModelActor.shardResolver ) systemRef = system } } def main(args: Array[String]): Unit = { HtmModelsManager.init() startup(Seq("2551"/*, "2552", "0"*/)) } def startupWeb(ports: Seq[String]) = { ports map { port => // Override the configuration of the port val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).withFallback(ConfigFactory.load()) // Create an Akka system val system = ActorSystem("ClusterSystem", config) // Create an actor that handles cluster domain events system.actorOf(Props[HtmModelsClusterListener], name = "htmModelsClusterListener2") } } }
antidata/htm-moclu
src/main/scala/com/github/antidata/bootstrap/Boot.scala
Scala
agpl-3.0
1,673
package com.enkidu.lignum.parsers.java.v8 import com.enkidu.lignum.parsers.ast.expression.discardable.dimension._ import com.enkidu.lignum.parsers.ast.expression.types._ import com.enkidu.lignum.parsers.ast.expression.types.annotations.Annotation import com.enkidu.lignum.parsers.ast.expression.types.coupled.ChildOfAll import com.enkidu.lignum.parsers.ast.expression.types.primitives._ import com.enkidu.lignum.parsers.ast.expression.types.references._ import com.enkidu.lignum.parsers.ast.expression.types.templates._ import org.parboiled2.Rule1 abstract class JavaTypeParser extends JavaLiteralParser { protected def annotations: Rule1[Seq[Annotation]] def `type`: Rule1[Type] = rule { (primitiveType | classType) ~ optionalDims ~> { (t: Type, ds: Seq[Dimension]) => if (ds.size == 0) t else ArrayType(t, ds) } } protected def primitiveType: Rule1[PrimitiveType] = rule { annotations ~ { `byte` ~> BytePrimitive | `short` ~> ShortPrimitive | `float` ~> FloatPrimitive | `int` ~> IntegerPrimitive | `long` ~> LongPrimitive | `char` ~> CharPrimitive | `double` ~> DoublePrimitive | `boolean` ~> BooleanPrimitive | `void` ~> VoidPrimitive } } protected def referenceType: Rule1[ReferenceType] = rule { arrayType | classType } protected def classType: Rule1[ClassType] = rule { annotations ~ push(None) ~ identifier ~ optionalTypeArguments ~> ClassType ~ { zeroOrMore(dot ~ annotations ~ identifier ~ optionalTypeArguments ~> { (p: ClassType, as: Seq[Annotation], id: String, args: Seq[TemplateArgument]) => ClassType(as, Some(p), id, args) }) } } protected def arrayType: Rule1[ArrayType] = rule { (primitiveType | classType) ~ dims ~> ArrayType } protected def dims: Rule1[Seq[Dimension]] = rule { oneOrMore(annotations ~ `[` ~ `]` ~> AbstractDimension) } protected def optionalDims: Rule1[Seq[Dimension]] = rule { zeroOrMore(annotations ~ `[` ~ `]` ~> AbstractDimension) } protected def typeParameter: Rule1[TemplateParameter] = rule { annotations ~ identifier ~ { typeBound ~> BoundedParameterTemplate | MATCH ~> ParameterTemplate } } private def typeBound: Rule1[Type] = rule { `extends` ~ classType ~ zeroOrMore(additionalBound) ~> ((t: ClassType, is: Seq[ClassType]) => if (is.size == 0) t else ChildOfAll(t +: is)) } protected def additionalBound: Rule1[ClassType] = rule { `&` ~ classType } protected def optionalTypeArguments: Rule1[Seq[TemplateArgument]] = rule { `<` ~ (zeroOrMore(typeArgument) separatedBy comma) ~ `>` | push(Vector()) } protected def typeArguments: Rule1[Seq[TemplateArgument]] = rule { `<` ~ (zeroOrMore(typeArgument) separatedBy comma) ~ `>` } private def typeArgument: Rule1[TemplateArgument] = rule { referenceType ~> ArgumentTemplate | annotations ~ { `?` ~ `extends` ~ referenceType ~> AnySubClassTemplate | `?` ~ `super` ~ referenceType ~> AnyBaseClassTemplate | `?` ~> AnyTemplate } } }
marek1840/java-parser
src/main/scala/com/enkidu/lignum/parsers/java/v8/JavaTypeParser.scala
Scala
mit
3,117
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iht.views.application.assets import iht.controllers.application.assets.routes import iht.forms.ApplicationForms._ import iht.models.application.basicElements.BasicEstateElement import iht.testhelpers.CommonBuilder import iht.views.application.ShareableElementInputViewBehaviour import iht.views.html.application.asset.money_owed import play.api.data.Form import play.twirl.api.HtmlFormat.Appendable class MoneyOwedViewTest extends ShareableElementInputViewBehaviour[BasicEstateElement] { lazy val regDetails = CommonBuilder.buildRegistrationDetails1 lazy val deceasedName = regDetails.deceasedDetails.fold("")(x => x.name) lazy val moneyOwedView: money_owed = app.injector.instanceOf[money_owed] override def form:Form[BasicEstateElement] = moneyOwedForm override def formToView:Form[BasicEstateElement] => Appendable = form => moneyOwedView(form, regDetails) override def pageTitle = messagesApi("iht.estateReport.assets.moneyOwed", deceasedName) override def browserTitle = messagesApi("iht.estateReport.assets.moneyOwed", messagesApi("iht.the.deceased")) override def questionTitle = messagesApi("page.iht.application.assets.moneyOwed.isOwned", deceasedName) override def valueQuestion = messagesApi("page.iht.application.assets.moneyOwed.inputLabel1", deceasedName) override def hasValueQuestionHelp = false override def valueQuestionHelp = "" override def returnLinkText = messagesApi("page.iht.application.return.to.assetsOf", deceasedName) override def returnLinkUrl = iht.controllers.application.assets.routes.AssetsOverviewController.onPageLoad().url override def formTarget =Some(routes.MoneyOwedController.onSubmit) override def linkHash = appConfig.AppSectionMoneyOwedID "Money Owed view" must { behave like yesNoValueView "show the correct guidance" in { messagesShouldBePresent(view, messagesApi("page.iht.application.assets.moneyOwed.description.p1", deceasedName), messagesApi("page.iht.application.assets.moneyOwed.description.p2", deceasedName), messagesApi("page.iht.application.assets.moneyOwed.description.p3", deceasedName)) } } }
hmrc/iht-frontend
test/iht/views/application/assets/MoneyOwedViewTest.scala
Scala
apache-2.0
2,751
package com.naughtyzombie.recipesearch import org.scalatra._ import org.scalatra.scalate.ScalateSupport trait RecipesearchStack extends ScalatraServlet with ScalateSupport { notFound { // remove content type in case it was set through an action contentType = null // Try to render a ScalateTemplate if no route matched findTemplate(requestPath) map { path => contentType = "text/html" layoutTemplate(path) } orElse serveStaticResource() getOrElse resourceNotFound() } }
pram/recipesearch
server/src/main/scala/com/naughtyzombie/recipesearch/RecipesearchStack.scala
Scala
mit
510
package im.actor.server.api.rpc.service import cats.data.Xor import im.actor.api.rpc._ import im.actor.api.rpc.messaging._ import im.actor.api.rpc.misc.ResponseSeq import im.actor.api.rpc.peers.{ ApiOutPeer, ApiPeer, ApiPeerType } import im.actor.server.acl.ACLUtils import im.actor.server.api.rpc.service.groups.{ GroupInviteConfig, GroupsServiceImpl } import im.actor.server.api.rpc.service.messaging.MessagingServiceImpl import im.actor.server._ import im.actor.server.dialog.{ DialogExtension, DialogGroupType } import scala.concurrent.{ Await, Future } import scala.concurrent.duration._ import scala.util.Random final class GroupedDialogsSpec extends BaseAppSuite with ImplicitAuthService with ImplicitSessionRegion with GroupsServiceHelpers with MessagingSpecHelpers { "LoadGroupedDialogs" should "load groups and privates" in loadGrouped "Hidden dialogs" should "appear on new message" in appearHidden it should "appear when peer sends message to dialog" in appearHidden2 it should "appear on show" in appearShown "Favourited dialogs" should "appear on favourite" in appearFavourite it should "not be in grouped dialogs if no favourites left" in noGroupInFavAbsent "Archived dialogs" should "be loaded by desc order" in archived "Deleted dialogs" should "not appear in dialog list, and should mark messages as deleted in db" in deleted private implicit lazy val groupsService = new GroupsServiceImpl(GroupInviteConfig("")) private implicit lazy val service = MessagingServiceImpl() def loadGrouped() = { val (user1, authId1, authSid1, _) = createUser() val (user2, _, _, _) = createUser() val (user3, _, _, _) = createUser() implicit val clientData = ClientData(authId1, 1, Some(AuthData(user1.id, authSid1, 42))) val group = createGroup("Some group", Set(user3.id)) val user2Peer = Await.result(ACLUtils.getOutPeer(ApiPeer(ApiPeerType.Private, user2.id), authId1), 5.seconds) val groupPeer = ApiOutPeer(ApiPeerType.Group, group.groupPeer.groupId, group.groupPeer.accessHash) whenReady(Future.sequence(Seq( service.handleSendMessage(user2Peer, Random.nextLong, ApiTextMessage("Hi there", Vector.empty, None), None, None), service.handleSendMessage(groupPeer, Random.nextLong, ApiTextMessage("Hi all there", Vector.empty, None), None, None) ))) { _ ⇒ whenReady(service.handleLoadGroupedDialogs(Vector.empty)) { resp ⇒ inside(resp) { case Ok(ResponseLoadGroupedDialogs(dgroups, users, groups, _, _, _, _)) ⇒ dgroups.length shouldBe 2 dgroups.map(_.key) should be(Seq( DialogExtension.groupKey(DialogGroupType.Groups), DialogExtension.groupKey(DialogGroupType.DirectMessages) )) val (gs, ps) = dgroups.foldLeft(IndexedSeq.empty[ApiDialogShort], IndexedSeq.empty[ApiDialogShort]) { case ((gs, ps), dg) ⇒ dg.key match { case "groups" ⇒ (dg.dialogs, ps) case "privates" ⇒ (gs, dg.dialogs) case unknown ⇒ throw new RuntimeException(s"Unknown dialog group key $unknown") } } inside(gs) { case Vector(g) ⇒ g.peer.id shouldBe group.groupPeer.groupId } inside(ps) { case Vector(p) ⇒ p.peer.id shouldBe user2.id } users.map(_.id).toSet shouldBe Set(user1.id, user2.id, user3.id) groups.map(_.id).toSet shouldBe Set(group.groupPeer.groupId) } } } } def appearHidden() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() val (eve, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) // sendMessageToUser(bob.id, textMessage("Hi Bob!")) prepareDialogs(bob, eve) whenReady(service.handleHideDialog(bobPeer))(identity) inside(getDialogGroups(DialogGroupType.DirectMessages)) { case Vector(d) ⇒ d.peer.id should equal(eve.id) } sendMessageToUser(bob.id, textMessage("Hi Bob!")) getDialogGroups(DialogGroupType.DirectMessages).map(_.peer.id).toSet should equal(Set(eve.id, bob.id)) } def appearHidden2() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, bobAuthId, bobAuthSid, _) = createUser() val (eve, _, _, _) = createUser() val aliceCD = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobCD = ClientData(bobAuthId, 1, Some(AuthData(bob.id, bobAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) { implicit val cd = aliceCD prepareDialogs(bob, eve) whenReady(service.handleHideDialog(bobPeer))(identity) inside(getDialogGroups(DialogGroupType.DirectMessages)) { case Vector(d) ⇒ d.peer.id should equal(eve.id) } } { implicit val cd = bobCD sendMessageToUser(alice.id, textMessage("Hi Alice!")) } { implicit val cd = aliceCD getDialogGroups(DialogGroupType.DirectMessages).map(_.peer.id).toSet should equal(Set(eve.id, bob.id)) } } def appearShown() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() val (eve, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) prepareDialogs(bob, eve) whenReady(service.handleHideDialog(bobPeer))(identity) inside(getDialogGroups(DialogGroupType.DirectMessages)) { case Vector(d) ⇒ d.peer.id should equal(eve.id) } whenReady(service.handleShowDialog(bobPeer))(identity) getDialogGroups(DialogGroupType.DirectMessages).map(_.peer.id).toSet should equal(Set(eve.id, bob.id)) } def appearFavourite() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) sendMessageToUser(bob.id, textMessage("Hi Bob!")) prepareDialogs(bob) whenReady(service.handleFavouriteDialog(bobPeer))(identity) whenReady(service.handleLoadGroupedDialogs(Vector.empty)) { resp ⇒ resp.toOption.get.dialogs.map(_.key).head should be(DialogExtension.groupKey(DialogGroupType.Favourites)) } inside(getDialogGroups(DialogGroupType.Favourites)) { case Vector(d) ⇒ d.peer.id should equal(bob.id) } } def noGroupInFavAbsent() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) sendMessageToUser(bob.id, textMessage("Hi Bob!")) prepareDialogs(bob) whenReady(service.handleFavouriteDialog(bobPeer))(identity) getDialogGroups().get(DialogExtension.groupKey(DialogGroupType.Favourites)) should not be empty whenReady(service.handleUnfavouriteDialog(bobPeer))(identity) getDialogGroups().get(DialogExtension.groupKey(DialogGroupType.Favourites)) shouldBe empty } def archived() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() val (eve, _, _, _) = createUser() val (kira, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) val evePeer = getOutPeer(eve.id, aliceAuthId) val kiraPeer = getOutPeer(kira.id, aliceAuthId) prepareDialogs(bob, eve, kira) whenReady(service.handleArchiveChat(bobPeer))(identity) whenReady(service.handleArchiveChat(evePeer))(identity) whenReady(service.handleArchiveChat(kiraPeer))(identity) val offset1 = whenReady(service.handleLoadArchived(None, 1, Vector.empty)) { resp ⇒ val okResp = resp.toOption.get okResp.dialogs.size shouldBe 1 okResp.dialogs.head.peer.id shouldBe kiraPeer.id okResp.nextOffset } val offset2 = whenReady(service.handleLoadArchived(offset1, 1, Vector.empty)) { resp ⇒ val okResp = resp.toOption.get okResp.dialogs.size shouldBe 1 okResp.dialogs.head.peer.id shouldBe evePeer.id okResp.nextOffset } whenReady(service.handleLoadArchived(offset2, 1, Vector.empty)) { resp ⇒ val okResp = resp.toOption.get okResp.dialogs.size shouldBe 1 okResp.dialogs.head.peer.id shouldBe bobPeer.id okResp.nextOffset } } def deleted() = { val (alice, aliceAuthId, aliceAuthSid, _) = createUser() val (bob, _, _, _) = createUser() val (charlie, _, _, _) = createUser() implicit val clientData = ClientData(aliceAuthId, 1, Some(AuthData(alice.id, aliceAuthSid, 42))) val bobPeer = getOutPeer(bob.id, aliceAuthId) val charliePeer = getOutPeer(charlie.id, aliceAuthId) prepareDialogs(bob, charlie) val mobileBefore = loadDialogs() mobileBefore should have length 2 val groupBefore = getDialogGroups() groupBefore("privates") should have length 2 whenReady(service.handleLoadHistory(charliePeer, 0L, None, 100, Vector.empty)) { resp ⇒ inside(resp) { case Xor.Right(histResp) ⇒ histResp.history should have length 1 } } whenReady(service.handleDeleteChat(charliePeer)) { resp ⇒ resp should matchPattern { case Ok(ResponseSeq(_, _)) ⇒ } } whenReady(service.handleLoadHistory(charliePeer, 0L, None, 100, Vector.empty)) { resp ⇒ inside(resp) { case Xor.Right(histResp) ⇒ histResp.history shouldBe empty } } val mobileAfter = loadDialogs() mobileAfter should have length 1 mobileAfter.head.peer.id shouldEqual bobPeer.id val groupAfter = getDialogGroups() groupAfter("privates") should have length 1 } }
EaglesoftZJ/actor-platform
actor-server/actor-tests/src/test/scala/im/actor/server/api/rpc/service/GroupedDialogsSpec.scala
Scala
agpl-3.0
10,175
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import cascading.tuple.TupleEntry import cascading.tuple.{Tuple => CTuple} import scala.collection.breakOut /** Typeclass to represent converting from cascading TupleEntry to some type T. * The most common application is to convert to scala Tuple objects for use * with the Fields API. The typed API internally manually handles its mapping * to cascading Tuples, so the implicit resolution mechanism is not used. * * WARNING: if you are seeing issues with the singleConverter being found when you * expect something else, you may have an issue where the enclosing scope needs to * take an implicit TupleConverter of the correct type. * * Unfortunately, the semantics we want (prefer to flatten tuples, but otherwise * put everything into one postition in the tuple) are somewhat difficlut to * encode in scala. */ trait TupleConverter[@specialized(Int,Long,Float,Double)T] extends java.io.Serializable with TupleArity { self => def apply(te : TupleEntry) : T def andThen[U](fn: T => U): TupleConverter[U] = new TupleConverter[U] { def apply(te: TupleEntry) = fn(self(te)) def arity = self.arity } } trait LowPriorityTupleConverters extends java.io.Serializable { implicit def singleConverter[@specialized(Int,Long,Float,Double)A](implicit g : TupleGetter[A]) = new TupleConverter[A] { def apply(tup : TupleEntry) = g.get(tup.getTuple, 0) def arity = 1 } } object TupleConverter extends GeneratedTupleConverters { /** Treat this TupleConverter as one for a superclass * We do this because we want to use implicit resolution invariantly, * but clearly, the operation is covariant */ def asSuperConverter[T,U>:T](tc: TupleConverter[T]): TupleConverter[U] = tc.asInstanceOf[TupleConverter[U]] def build[T](thisArity: Int)(fn: TupleEntry => T): TupleConverter[T] = new TupleConverter[T] { def apply(te: TupleEntry) = fn(te) def arity = thisArity } def fromTupleEntry[T](t: TupleEntry)(implicit tc: TupleConverter[T]): T = tc(t) def arity[T](implicit tc: TupleConverter[T]): Int = tc.arity def of[T](implicit tc: TupleConverter[T]): TupleConverter[T] = tc /** Copies the tupleEntry, since cascading may change it after the end of an * operation (and it is not safe to assume the consumer has not kept a ref * to this tuple) */ implicit lazy val TupleEntryConverter: TupleConverter[TupleEntry] = new TupleConverter[TupleEntry] { override def apply(tup : TupleEntry) = new TupleEntry(tup) override def arity = -1 } /** Copies the tuple, since cascading may change it after the end of an * operation (and it is not safe to assume the consumer has not kept a ref * to this tuple */ implicit lazy val CTupleConverter: TupleConverter[CTuple] = new TupleConverter[CTuple] { override def apply(tup : TupleEntry) = tup.getTupleCopy override def arity = -1 } /** In the case where you don't know the arity, prefer to use this. */ implicit lazy val ProductTupleConverter: TupleConverter[Product] = new TupleConverter[Product] { def wrap(tup: CTuple): Product = new Product { def canEqual(that: Any) = that match { case p: Product => true case _ => false } def productArity = tup.size def productElement(idx: Int) = tup.getObject(idx) } override def apply(tup : TupleEntry) = wrap(tup.getTupleCopy) override def arity = -1 } implicit lazy val UnitConverter: TupleConverter[Unit] = new TupleConverter[Unit] { override def apply(arg : TupleEntry) = () override def arity = 0 } // Doesn't seem safe to make these implicit by default: /** Convert a TupleEntry to a List of CTuple, of length 2, with key, value * from the TupleEntry (useful for RichPipe.unpivot) */ object KeyValueList extends TupleConverter[List[CTuple]] { def apply(tupe : TupleEntry): List[CTuple] = { val keys = tupe.getFields (0 until keys.size).map { idx => new CTuple(keys.get(idx).asInstanceOf[Object], tupe.getObject(idx)) }(breakOut) } def arity = -1 } object ToMap extends TupleConverter[Map[String, AnyRef]] { def apply(tupe: TupleEntry): Map[String, AnyRef] = { val keys = tupe.getFields (0 until keys.size).map { idx => (keys.get(idx).toString, tupe.getObject(idx)) }(breakOut) } def arity = -1 } /** Utility to create a single item Tuple */ def tupleAt(idx: Int)(tup: CTuple): CTuple = { val obj = tup.getObject(idx) val res = CTuple.size(1) res.set(0, obj) res } }
vidyar/twitterscalding
scalding-core/src/main/scala/com/twitter/scalding/TupleConverter.scala
Scala
apache-2.0
5,146
/** * Copyright 2014-2015 Martin Cooper * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.martincooper.datatable.DataSetSpecs import com.github.martincooper.datatable.{ DataSet, DataTable } import org.scalatest.{ FlatSpec, Matchers } class DataSetAddTableSpec extends FlatSpec with Matchers { "A DataSet" should "allow a table to be added" in { val tableOne = DataTable("TableOne").get val tableTwo = DataTable("TableTwo").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableTwo)).get val tableThree = DataTable("TableThree").get val newDataSet = dataSet.add(tableThree) newDataSet.isSuccess should be(true) newDataSet.get.tables.length should be(3) newDataSet.get.tables.map(_.name) should be(Seq("TableOne", "TableTwo", "TableThree")) } it should "disallow a table with a duplicate name to be added" in { val tableOne = DataTable("TableOne").get val tableTwo = DataTable("TableTwo").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableTwo)).get val tableThree = DataTable("TableOne").get val newDataSet = dataSet.add(tableThree) newDataSet.isSuccess should be(false) newDataSet.failed.get.getMessage should be("Tables contain duplicate names.") } }
martincooper/scala-datatable
src/test/scala/com/github/martincooper/datatable/DataSetSpecs/DataSetAddTableSpec.scala
Scala
apache-2.0
1,780
/* * ____ ____ _____ ____ ___ ____ * | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R) * | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data * | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc. * |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved. * * This program is free software: you can redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the Free Software Foundation, either version * 3 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License along with this * program. If not, see <http://www.gnu.org/licenses/>. * */ package com.precog.yggdrasil package jdbm3 import table._ import com.precog.common._ import com.precog.util._ import com.precog.util.BitSet import com.precog.util.BitSetUtil import com.precog.util.BitSetUtil.Implicits._ import org.joda.time.{DateTime, Period} import java.nio.ByteBuffer import scala.annotation.tailrec import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.{ specialized => spec } import scalaz._ trait ColumnEncoder { def encodeFromRow(row: Int): Array[Byte] } trait ColumnDecoder { def decodeToRow(row: Int, src: Array[Byte], offset: Int = 0): Unit } trait RowFormat { def columnRefs: Seq[ColumnRef] def ColumnEncoder(cols: Seq[Column]): ColumnEncoder def ColumnDecoder(cols: Seq[ArrayColumn[_]]): ColumnDecoder def encode(cValues: List[CValue]): Array[Byte] def decode(bytes: Array[Byte], offset: Int = 0): List[CValue] def compare(a: Array[Byte], b: Array[Byte]): Int = { val selectors = columnRefs map (_.selector) val aVals = selectors zip decode(a) groupBy (_._1) val bVals = selectors zip decode(b) groupBy (_._1) val cmp = selectors.distinct.iterator map { cPath => val a = aVals(cPath) find (_._2 != CUndefined) val b = bVals(cPath) find (_._2 != CUndefined) (a, b) match { case (None, None) => 0 case (None, _) => -1 case (_, None) => 1 case (Some((_, a)), Some((_, b))) => CValue.compareValues(a, b) } } find (_ != 0) getOrElse 0 cmp } } object RowFormat { val byteBufferPool = new ByteBufferPool() def forSortingKey(columnRefs: Seq[ColumnRef]): RowFormat = SortingKeyRowFormatV1(columnRefs) def forValues(columnRefs: Seq[ColumnRef]): RowFormat = ValueRowFormatV1(columnRefs) def forIdentities(columnRefs: Seq[ColumnRef]): RowFormat = IdentitiesRowFormatV1(columnRefs) case class ValueRowFormatV1(_columnRefs: Seq[ColumnRef]) extends ValueRowFormat with RowFormatCodecs { // This is really stupid, but required to work w/ JDBM. @transient lazy val columnRefs: Seq[ColumnRef] = _columnRefs map { ref => ref.copy(ctype = ref.ctype.readResolve()) } // TODO Get this from somewhere else? def pool = byteBufferPool } case class SortingKeyRowFormatV1(_columnRefs: Seq[ColumnRef]) extends RowFormatCodecs with SortingRowFormat { @transient lazy val columnRefs: Seq[ColumnRef] = _columnRefs map { ref => ref.copy(ctype = ref.ctype.readResolve()) } def pool = byteBufferPool } case class IdentitiesRowFormatV1(_columnRefs: Seq[ColumnRef]) extends IdentitiesRowFormat { @transient lazy val columnRefs: Seq[ColumnRef] = _columnRefs map { ref => ref.copy(ctype = ref.ctype.readResolve()) } } } trait RowFormatSupport { self: StdCodecs => import ByteBufferPool._ protected trait ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] } protected trait SimpleColumnValueEncoder[A] extends ColumnValueEncoder { val codec: Codec[A] @tailrec protected final def writeMore(s: codec.S, pool: ByteBufferPool, buffers: List[ByteBuffer]): List[ByteBuffer] = { val buffer = pool.acquire codec.writeMore(s, buffer) match { case Some(s) => writeMore(s, pool, buffer :: buffers) case None => (buffer :: buffers).reverse } } } def getColumnEncoder(cType: CType, col: Column): ColumnValueEncoder = (cType, col) match { case (CLong, col: LongColumn) => new SimpleColumnValueEncoder[Long] { val codec = Codec[Long] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CDouble, col: DoubleColumn) => new SimpleColumnValueEncoder[Double] { val codec = Codec[Double] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CNum, col: NumColumn) => new SimpleColumnValueEncoder[BigDecimal] { val codec = Codec[BigDecimal] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CBoolean, col: BoolColumn) => new SimpleColumnValueEncoder[Boolean] { val codec = Codec[Boolean] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CString, col: StrColumn) => new SimpleColumnValueEncoder[String] { val codec = Codec[String] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CDate, col: DateColumn) => new SimpleColumnValueEncoder[DateTime] { val codec = Codec[DateTime] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CPeriod, col: PeriodColumn) => new SimpleColumnValueEncoder[Period] { val codec = Codec[Period] def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { codec.writeInit(col(row), buffer) match { case Some(s) => Some(writeMore(s, pool, buffer :: Nil)) case None => None } } } case (CEmptyObject, col: EmptyObjectColumn) => new ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = None } case (CEmptyArray, col: EmptyArrayColumn) => new ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = None } case (CNull, col: NullColumn) => new ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = None } case (cType, col) => sys.error( "Cannot create column encoder, columns of wrong type (expected %s, found %s)." format (cType, col.tpe)) } protected trait ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer): Unit } def getColumnDecoder(cType: CType, col: ArrayColumn[_]): ColumnValueDecoder = (cType, col) match { case (CLong, col: ArrayLongColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[Long].read(buf)) } case (CDouble, col: ArrayDoubleColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[Double].read(buf)) } case (CNum, col: ArrayNumColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[BigDecimal].read(buf)) } case (CBoolean, col: ArrayBoolColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[Boolean].read(buf)) } case (CString, col: ArrayStrColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[String].read(buf)) } case (CDate, col: ArrayDateColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[DateTime].read(buf)) } case (CPeriod, col: ArrayPeriodColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, Codec[Period].read(buf)) } case (CEmptyObject, col: MutableEmptyObjectColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, true) } case (CEmptyArray, col: MutableEmptyArrayColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, true) } case (CNull, col: MutableNullColumn) => new ColumnValueDecoder { def decode(row: Int, buf: ByteBuffer) = col.update(row, true) } case _ => sys.error("Cannot create column decoder, columns of wrong type.") } protected def encodeRow(row: Int, undefined: RawBitSet, encoders: Array[ColumnValueEncoder], init: ByteBuffer, pool: ByteBufferPool): Array[Byte] = { var buffer = init var filled: ListBuffer[ByteBuffer] = null @inline @tailrec def encodeAll(i: Int): Unit = if (i < encoders.length) { if (!RawBitSet.get(undefined, i)) { encoders(i).encode(row, buffer, pool) match { case Some(buffers) => if (filled == null) filled = new ListBuffer[ByteBuffer]() filled ++= buffers buffer = pool.acquire case None => } } encodeAll(i + 1) } encodeAll(0) if (filled != null) { filled += buffer val all = filled.toList val bytes = ByteBufferPool.getBytesFrom(filled.toList) all foreach { pool.release(_) } bytes } else { buffer.flip() val len = buffer.remaining() val bytes = new Array[Byte](len) buffer.get(bytes) pool.release(buffer) bytes } } } trait ValueRowFormat extends RowFormat with RowFormatSupport { self: StdCodecs => import ByteBufferPool._ def pool: ByteBufferPool def encode(cValues: List[CValue]) = getBytesFrom(RowCodec.writeAll(cValues)(pool.acquire _).reverse) def decode(bytes: Array[Byte], offset: Int): List[CValue] = RowCodec.read(ByteBuffer.wrap(bytes, offset, bytes.length - offset)) def ColumnEncoder(cols: Seq[Column]) = { require(columnRefs.size == cols.size) val colValueEncoders: Array[ColumnValueEncoder] = { (columnRefs zip cols).map({ case (ColumnRef(_, cType), col) => getColumnEncoder(cType, col) })(collection.breakOut) } new ColumnEncoder { val colsArray = cols.toArray def encodeFromRow(row: Int) = { val undefined = RawBitSet.create(colsArray.length) @inline @tailrec def definedCols(i: Int): Unit = if (i >= 0) { if (!colsArray(i).isDefinedAt(row)) RawBitSet.set(undefined, i) definedCols(i - 1) } definedCols(colsArray.length - 1) val init = pool.acquire Codec[RawBitSet].writeUnsafe(undefined, init) encodeRow(row, undefined, colValueEncoders, init, pool) } } } def ColumnDecoder(cols: Seq[ArrayColumn[_]]) = { require(columnRefs.size == cols.size) //val decoders: Seq[(ColumnValueDecoder, Int)] = // Seq[((Int, ByteBuffer) => Unit, Int)] = // (columnRefs zip cols map { case (ref, col) => getColumnDecoder(ref.ctype, col) }).zipWithIndex val decoders: List[ColumnValueDecoder] = (columnRefs zip cols).map { case (ref, col) => getColumnDecoder(ref.ctype, col) }(collection.breakOut) new ColumnDecoder { def decodeToRow(row: Int, src: Array[Byte], offset: Int = 0) { val buf = ByteBuffer.wrap(src, offset, src.length - offset) val undefined = Codec[RawBitSet].read(buf) @tailrec def helper(i: Int, decs: List[ColumnValueDecoder]) { decs match { case h :: t => if (!RawBitSet.get(undefined, i)) h.decode(row, buf) helper(i + 1, t) case Nil => } } helper(0, decoders) } } } case object RowCodec extends Codec[List[CValue]] { import Codec.{ StatefulCodec, wrappedWriteInit } // @transient lazy val bitSetCodec = Codec[BitSet] @transient lazy val rawBitSetCodec = Codec[RawBitSet] @transient private lazy val codecs: List[Codec[_ <: CValue]] = columnRefs.toList map { case ColumnRef(_, cType: CValueType[_]) => Codec.CValueCodec(cType)(codecForCValueType(cType)) case ColumnRef(_, cType: CNullType) => Codec.ConstCodec(cType) } type S = (Either[rawBitSetCodec.S, StatefulCodec#State], List[CValue]) private def undefineds(xs: List[CValue]): RawBitSet = { val bits = RawBitSet.create(xs.size) @inline @tailrec def rec(i: Int, xs: List[CValue]): Unit = xs match { case CUndefined :: xs => RawBitSet.set(bits, i); rec(i + 1, xs) case _ :: xs => rec(i + 1, xs) case Nil => } rec(0, xs) bits } def encodedSize(xs: List[CValue]) = xs.foldLeft(rawBitSetCodec.encodedSize(undefineds(xs))) { (acc, x) => acc + (x match { case x: CWrappedValue[_] => codecForCValueType(x.cType).encodedSize(x.value) case _ => 0 }) } override def maxSize(xs: List[CValue]) = xs.foldLeft(rawBitSetCodec.maxSize(undefineds(xs))) { (acc, x) => acc + (x match { case x: CWrappedValue[_] => codecForCValueType(x.cType).maxSize(x.value) case _ => 0 }) } def writeUnsafe(xs: List[CValue], sink: ByteBuffer) { rawBitSetCodec.writeUnsafe(undefineds(xs), sink) xs foreach { case x: CWrappedValue[_] => codecForCValueType(x.cType).writeUnsafe(x.value, sink) case _ => } } @tailrec private def writeCValues(xs: List[CValue], sink: ByteBuffer): Option[S] = xs match { case x :: xs => (x match { case CBoolean(x) => wrappedWriteInit[Boolean](x, sink) case CString(x) => wrappedWriteInit[String](x, sink) case CDate(x) => wrappedWriteInit[DateTime](x, sink) case CPeriod(x) => wrappedWriteInit[Period](x, sink) case CLong(x) => wrappedWriteInit[Long](x, sink) case CDouble(x) => wrappedWriteInit[Double](x, sink) case CNum(x) => wrappedWriteInit[BigDecimal](x, sink) case CArray(x, cType) => wrappedWriteInit(x, sink)(codecForCValueType(cType)) case _: CNullType => None }) match { case None => writeCValues(xs, sink) case Some(s) => Some((Right(s), xs)) } case Nil => None } def writeInit(xs: List[CValue], sink: ByteBuffer) = { rawBitSetCodec.writeInit(undefineds(xs), sink) match { case Some(s) => Some((Left(s), xs)) case None => writeCValues(xs, sink) } } def writeMore(more: S, sink: ByteBuffer) = more match { case (Left(s), xs) => rawBitSetCodec.writeMore(s, sink) map (s => (Left(s), xs)) orElse writeCValues(xs, sink) case (Right(s), xs) => s.more(sink) map (s => (Right(s), xs)) orElse writeCValues(xs, sink) } def read(src: ByteBuffer): List[CValue] = { val undefined = rawBitSetCodec.read(src) codecs.zipWithIndex collect { case (codec, i) if RawBitSet.get(undefined, i) => CUndefined case (codec, _) => codec.read(src) } } } } /** * This is a row format that is optimized for quickly comparing 2 encoded rows * (ie. byte arrays). */ trait SortingRowFormat extends RowFormat with StdCodecs with RowFormatSupport { import SortingRowFormat._ def pool: ByteBufferPool override implicit def StringCodec = Codec.Utf8Codec @transient abstract override implicit lazy val BigDecimalCodec: Codec[BigDecimal] = Codec.CompositeCodec[Double, BigDecimal, BigDecimal](Codec[Double], super.BigDecimalCodec, bd => (bd.toDouble, bd), (_, bd) => bd) @transient lazy val selectors: List[(CPath, List[CType])] = { val refs: Map[CPath, Seq[ColumnRef]] = columnRefs.groupBy(_.selector) (columnRefs map (_.selector)).distinct.map(selector => (selector, refs(selector).map(_.ctype).toList))(collection.breakOut) } private def zipWithSelectors[A](xs: Seq[A]): List[(CPath, Seq[(A, CType)])] = { @tailrec def zip(zipped: List[(CPath, Seq[(A, CType)])], right: Seq[A], sels: List[(CPath, List[CType])]): List[(CPath, Seq[(A, CType)])] = sels match { case Nil => zipped.reverse case (path, cTypes) :: sels => val (head, tail) = right splitAt cTypes.size zip((path, head zip cTypes) :: zipped, tail, sels) } zip(Nil, xs, selectors) } def ColumnEncoder(cols: Seq[Column]) = { import ByteBufferPool._ val colValueEncoders: Array[ColumnValueEncoder] = zipWithSelectors(cols).map({ case (_, colsAndTypes) => val writers: Seq[ColumnValueEncoder] = colsAndTypes map { case (col, cType) => val writer = getColumnEncoder(cType, col) new ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { val flag = SortingRowFormat.flagForCType(cType) if (buffer.remaining() > 0) { buffer.put(flag) writer.encode(row, buffer, pool) } else { val nextBuffer = pool.acquire nextBuffer.put(flag) writer.encode(row, nextBuffer, pool) match { case Some(buffers) => Some(buffer :: buffers) case None => Some(buffer :: nextBuffer :: Nil) } } } } } val selCols: Seq[Column] = colsAndTypes map (_._1) new ColumnValueEncoder { def encode(row: Int, buffer: ByteBuffer, pool: ByteBufferPool): Option[List[ByteBuffer]] = { (writers zip selCols) find (_._2.isDefinedAt(row)) map (_._1.encode(row, buffer, pool)) getOrElse { val flag = SortingRowFormat.flagForCType(CUndefined) if (buffer.remaining() > 0) { buffer.put(flag) None } else { val nextBuffer = pool.acquire nextBuffer.put(flag) Some(buffer :: nextBuffer :: Nil) } } } } })(collection.breakOut) new ColumnEncoder { val undefined = RawBitSet.create(0) def encodeFromRow(row: Int) = encodeRow(row, undefined, colValueEncoders, pool.acquire, pool) } } def ColumnDecoder(cols: Seq[ArrayColumn[_]])= { val decoders: List[Map[Byte, ColumnValueDecoder]] = zipWithSelectors(cols) map { case (_, colsWithTypes) => val decoders: Map[Byte, ColumnValueDecoder] = (for ((col, cType) <- colsWithTypes) yield { (flagForCType(cType), getColumnDecoder(cType, col)) })(collection.breakOut) decoders } new ColumnDecoder { def decodeToRow(row: Int, src: Array[Byte], offset: Int = 0) { val buf = ByteBuffer.wrap(src, offset, src.length - offset) @tailrec def decode(decoders: List[Map[Byte, ColumnValueDecoder]]): Unit = decoders match { case selDecoder :: decoders => val flag = buf.get() if (flag != FUndefined) { selDecoder(flag).decode(row, buf) } decode(decoders) case Nil => // Do nothing. } decode(decoders) } } } def encode(cValues: List[CValue]): Array[Byte] = { val cvals: List[CValue] = zipWithSelectors(cValues) map { case (_, cvals) => cvals map (_._1) find (_ != CUndefined) getOrElse CUndefined } import ByteBufferPool._ import scalaz.syntax.traverse._ import scalaz.std.list._ val writes: ByteBufferPoolS[List[Unit]] = cvals.map { case v: CNullValue => writeFlagFor(v.cType) case v: CWrappedValue[_] => for { _ <- writeFlagFor(v.cType) _ <- codecForCValueType(v.cType).write(v.value) } yield () }.sequence pool.run(for { _ <- writes bytes <- flipBytes _ <- release } yield bytes) } def decode(bytes: Array[Byte], offset: Int = 0): List[CValue] = { val buf = ByteBuffer.wrap(bytes) def readForSelector(cTypes: List[CType]): List[CValue] = { val cValue = cTypeForFlag(buf.get()) match { case cType: CValueType[_] => cType(codecForCValueType(cType).read(buf)) case cType: CNullType => cType } val cType = cValue.cType cTypes map { case `cType` => cValue case _ => CUndefined } } selectors.map { case (_, cTypes) => readForSelector(cTypes) }.flatten } override def compare(a: Array[Byte], b: Array[Byte]): Int = { val abuf = ByteBuffer.wrap(a) val bbuf = ByteBuffer.wrap(b) @inline def compareNext(): Int = { val aType = abuf.get() val bType = bbuf.get() if ((aType & 0xF0) == (bType & 0xF0)) { ((aType & 0xF0).toByte) match { case FUndefined => 0 case FBoolean => abuf.get() - bbuf.get() case FString => Codec.Utf8Codec.compare(abuf, bbuf) case FNumeric => aType match { case FLong => val a = Codec[Long].read(abuf) bType match { case FLong => NumericComparisons.compare(a, Codec[Long].read(bbuf)) case FDouble => NumericComparisons.compare(a, Codec[Double].read(bbuf)) case FBigDecimal => val b = Codec[Double].read(bbuf) NumericComparisons.approxCompare(a.toDouble, b) match { case 0 => BigDecimal(a) compare super.BigDecimalCodec.read(bbuf) case cmp => super.BigDecimalCodec.skip(bbuf) cmp } } case FDouble => val a = Codec[Double].read(abuf) bType match { case FLong => NumericComparisons.compare(a, Codec[Long].read(bbuf)) case FDouble => NumericComparisons.compare(a, Codec[Double].read(bbuf)) case FBigDecimal => val b = Codec[Double].read(bbuf) NumericComparisons.approxCompare(a, b) match { case 0 => BigDecimal(a) compare super.BigDecimalCodec.read(bbuf) case cmp => super.BigDecimalCodec.skip(bbuf) cmp } } case FBigDecimal => val a = Codec[Double].read(abuf) bType match { case FLong => val b = Codec[Long].read(bbuf) NumericComparisons.approxCompare(a, b.toDouble) match { case 0 => super.BigDecimalCodec.read(abuf) compare BigDecimal(b) case cmp => super.BigDecimalCodec.skip(abuf) cmp } case FDouble => val b = Codec[Double].read(bbuf) NumericComparisons.approxCompare(a, b) match { case 0 => super.BigDecimalCodec.read(abuf) compare BigDecimal(b) case cmp => super.BigDecimalCodec.skip(abuf) cmp } case FBigDecimal => val b = Codec[Double].read(bbuf) NumericComparisons.approxCompare(a, b) match { case 0 => super.BigDecimalCodec.read(abuf) compare super.BigDecimalCodec.read(bbuf) case cmp => super.BigDecimalCodec.skip(abuf) super.BigDecimalCodec.skip(bbuf) cmp } } } case FEmptyObject => 0 case FEmptyArray => 0 case FNull => 0 case FDate => math.signum(Codec[Long].read(abuf) - Codec[Long].read(bbuf)).toInt case FPeriod => math.signum(Codec[Long].read(abuf) - Codec[Long].read(bbuf)).toInt case x => sys.error("Match error for: " + x) } } else { (aType.toInt & 0xFF) - (bType.toInt & 0xFF) } } @tailrec def compare(cmp: Int): Int = if (cmp == 0) { if (abuf.remaining() > 0) compare(compareNext()) else 0 } else cmp compare(0) } } object SortingRowFormat { def writeFlagFor[M[+_]](cType: CType)(implicit M: ByteBufferMonad[M]): M[Unit] = { import scalaz.syntax.monad._ val flag = flagForCType(cType) for (buf <- M.getBuffer(1)) yield { buf.put(flag) () } } def flagForCType(cType: CType): Byte = cType match { case CBoolean => FBoolean case CString => FString case CLong => FLong case CDouble => FDouble case CNum => FBigDecimal case CDate => FDate case CPeriod => FPeriod case CEmptyObject => FEmptyObject case CEmptyArray => FEmptyArray case CNull => FNull case CUndefined => FUndefined } def cTypeForFlag(flag: Byte): CType = flag match { case FBoolean => CBoolean case FString => CString case FLong => CLong case FDouble => CDouble case FBigDecimal => CNum case FDate => CDate case FPeriod => CPeriod case FEmptyObject => CEmptyObject case FEmptyArray => CEmptyArray case FNull => CNull case FUndefined => CUndefined } private val FUndefined: Byte = 0x0.toByte private val FBoolean: Byte = 0x10.toByte private val FString: Byte = 0x20.toByte private val FNumeric: Byte = 0x40.toByte private val FLong: Byte = 0x41.toByte private val FDouble: Byte = 0x42.toByte private val FBigDecimal: Byte = 0x43.toByte private val FEmptyObject: Byte = 0x60.toByte private val FEmptyArray: Byte = 0x70.toByte private val FNull: Byte = 0x80.toByte private val FDate: Byte = 0x90.toByte private val FPeriod: Byte = 0x91.toByte } trait IdentitiesRowFormat extends RowFormat { lazy val identities: Int = columnRefs.size // FYI: This is here purely to ensure backwards compatiblity. Not used. // When we upgrade the serialization format, we can remove this. private final val codec = Codec.PackedLongCodec private final def packedSize(n: Long): Int = { @inline @tailrec def loop(size: Int, n: Long): Int = { val m = n >>> 7 if (m == 0) size + 1 else loop(size + 1, m) } loop(0, n) } // Packs the Long n into bytes, starting at offset, and returns the next free // position in bytes to store a Long. private final def packLong(n: Long, bytes: Array[Byte], offset: Int): Int = { @tailrec @inline def loop(i: Int, n: Long): Int = { val m = n >>> 7 val b = n & 0x7FL if (m != 0) { bytes(i) = (b | 0x80L).toByte loop(i + 1, m) } else { bytes(i) = b.toByte i + 1 } } loop(offset, n) } @inline private final def shiftIn(b: Byte, shift: Int, n: Long): Long = n | ((b.toLong & 0x7FL) << shift) @inline private final def more(b: Byte): Boolean = (b & 0x80) != 0 def encodeIdentities(xs: Array[Long]) = { @inline @tailrec def sumPackedSize(xs: Array[Long], i: Int, len: Int): Int = if (i < xs.length) { sumPackedSize(xs, i + 1, len + packedSize(xs(i))) } else { len } val bytes = new Array[Byte](sumPackedSize(xs, 0, 0)) @inline @tailrec def packAll(xs: Array[Long], i: Int, offset: Int) { if (i < xs.length) packAll(xs, i + 1, packLong(xs(i), bytes, offset)) } packAll(xs, 0, 0) bytes } def encode(cValues: List[CValue]): Array[Byte] = { @inline @tailrec def sumPackedSize(cvals: List[CValue], len: Int): Int = cvals match { case CLong(n) :: cvals => sumPackedSize(cvals, len + packedSize(n)) case cv :: _ => sys.error("Expecting CLong, but found: " + cv) case Nil => len } val bytes = new Array[Byte](sumPackedSize(cValues, 0)) @inline @tailrec def packAll(xs: List[CValue], offset: Int): Unit = xs match { case CLong(n) :: xs => packAll(xs, packLong(n, bytes, offset)) case _ => } packAll(cValues, 0) bytes } def decode(bytes: Array[Byte], offset: Int): List[CValue] = { val longs = new Array[Long](identities) @inline @tailrec def loop(offset: Int, shift: Int, n: Long, i: Int) { val lo = bytes(offset) val m = shiftIn(lo, shift, n) val nOffset = offset + 1 if (more(lo)) loop(nOffset, shift + 7, m, i) else { longs(i) = m if (nOffset < bytes.length) loop(nOffset, 0, 0L, i + 1) } } if (identities > 0) loop(offset, 0, 0L, 0) longs.map(CLong(_))(collection.breakOut) } def ColumnEncoder(cols: Seq[Column]) = { val longCols: Array[LongColumn] = cols.map({ case col: LongColumn => col case col => sys.error("Expecing LongColumn, but found: " + col) })(collection.breakOut) new ColumnEncoder { def encodeFromRow(row: Int): Array[Byte] = { @inline @tailrec def sumPackedSize(i: Int, len: Int): Int = if (i < longCols.length) { sumPackedSize(i + 1, len + packedSize(longCols(i)(row))) } else len val bytes = new Array[Byte](sumPackedSize(0, 0)) @inline @tailrec def packAll(i: Int, offset: Int): Unit = if (i < longCols.length) { packAll(i + 1, packLong(longCols(i)(row), bytes, offset)) } packAll(0, 0) bytes } } } def ColumnDecoder(cols: Seq[ArrayColumn[_]]) = { val longCols: Array[ArrayLongColumn] = cols.map({ case col: ArrayLongColumn => col case col => sys.error("Expecing ArrayLongColumn, but found: " + col) })(collection.breakOut) new ColumnDecoder { def decodeToRow(row: Int, src: Array[Byte], offset: Int = 0) { @inline @tailrec def loop(offset: Int, shift: Int, n: Long, col: Int) { val b = src(offset) val m = shiftIn(b, shift, n) val nOffset = offset + 1 if (more(b)) loop(nOffset, shift + 7, m, col) else { longCols(col).update(row, m) if (nOffset < src.length) loop(nOffset, 0, 0L, col + 1) } } if (src.length > 0) loop(0, 0, 0L, 0) } } } override def compare(a: Array[Byte], b: Array[Byte]): Int = { @inline @tailrec def loop(offset: Int, shift: Int, n: Long, m: Long): Int = { val b1 = a(offset) val b2 = b(offset) val n2 = shiftIn(b1, shift, n) val m2 = shiftIn(b2, shift, m) val nOffset = offset + 1 val moreA = more(b1) val moreB = more(b2) if (moreA && moreB) { loop(nOffset, shift + 7, n2, m2) } else if (moreA) { 1 } else if (moreB) { -1 } else if (n2 < m2) { -1 } else if (m2 < n2) { 1 } else if (nOffset < a.length) { loop(nOffset, 0, 0L, 0L) } else { 0 } } if (identities == 0) 0 else loop(0, 0, 0L, 0L) } }
precog/platform
yggdrasil/src/main/scala/com/precog/yggdrasil/jdbm3/RowFormat.scala
Scala
agpl-3.0
32,715
package ftl.akka.remote.serialization import java.util.concurrent.ConcurrentHashMap import akka.serialization.Serializer import com.google.protobuf.{ Message, Parser } /** * Created by kerr. */ class ProtobufSerializer extends Serializer { private val serializerBinding = new ConcurrentHashMap[Class[_], Parser[Message]]() override def identifier: Int = 17 override def includeManifest: Boolean = true override def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = { manifest match { case Some(clazz) => val cachedParser = serializerBinding.get(clazz) if (cachedParser ne null) { cachedParser.parseFrom(bytes) } else { val parser = clazz.getField("PARSER").get(null).asInstanceOf[Parser[Message]] val previousParser = serializerBinding.putIfAbsent(clazz, parser) if (previousParser ne null) { previousParser.parseFrom(bytes) } else { parser.parseFrom(bytes) } } case None => throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf") } } override def toBinary(obj: AnyRef): Array[Byte] = obj match { case message: Message => message.toByteArray case _ => throw new IllegalArgumentException(s"Can't serialize a non-protobuf message using protobuf [$obj]") } }
hepin1989/akka-remote-transport-netty4
src/main/scala/ftl/akka/remote/serialization/ProtobufSerializer.scala
Scala
apache-2.0
1,398
package nasa.nccs.edas.kernels import com.google.common.reflect.ClassPath import nasa.nccs.utilities.Loggable import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import scala.xml // TOUSE: Include dependency 'reflections' //class ReflectionTools { // import org.reflections._ // import org.reflections.scanners.{ResourcesScanner, SubTypesScanner} // import org.reflections.util.{ClasspathHelper, ConfigurationBuilder, FilterBuilder} // // val kernelsPackage = "nasa.nccs.cds2.modules" // val classLoadersList = List[ClassLoader](ClasspathHelper.contextClassLoader(), ClasspathHelper.staticClassLoader() ) // val configuration = new ConfigurationBuilder().setScanners(new SubTypesScanner(false), new ResourcesScanner()) // .setUrls(ClasspathHelper.forClassLoader(classLoadersList:_*)).filterInputsBy(new FilterBuilder().include(FilterBuilder.prefix( kernelsPackage ))) // // val reflections: Reflections = new Reflections(configuration) // //} object ClassInfoRec { def apply( classinfo: ClassPath.ClassInfo ): ClassInfoRec = new ClassInfoRec( classinfo.getPackageName.split('.').last, classinfo.getSimpleName, classinfo ) } class ClassInfoRec( val module: String, val name: String, val classinfo: ClassPath.ClassInfo ) { def getMapEntry = ( name.toLowerCase -> classinfo ) } object KernelModule extends Loggable { def apply( classInfoRecs: List[ClassInfoRec] ): KernelModule = { val kernelClassMap = Map(classInfoRecs.map(_.getMapEntry): _*) new KernelModule( classInfoRecs.head.module, kernelClassMap.mapValues(KernelModule.loadKernel(_) ) ) } def apply(moduleSpec: String): KernelModule = { val specToks = moduleSpec.split("[!]") val api = specToks(1) val module_name = (api + "." + specToks(0)).toLowerCase() val kernelSpecs = specToks(2).split("[~]") val kernels = kernelSpecs.map(kspec => Kernel( module_name, kspec, api ) ) new KernelModule( module_name, Map( kernels.map( kernel => kernel.operation.toLowerCase -> Some(kernel)): _*) ) } def toXml( moduleSpec: String ): xml.Elem = { val specToks = moduleSpec.split("[!]") val kernelSpecs = specToks(2).split("[~]") <kernelModule name={specToks(0)} api={specToks(1)}> <kernels> { kernelSpecs.map( kernelSpec => getKernelXml(specToks(0),kernelSpec) ) } </kernels> </kernelModule> } def getKernelXml( modname: String, kernelSpec: String ): xml.Elem = { val specToks = kernelSpec.split("[;]") <kernel module={modname} name={specToks(0)} description={specToks(1)} inputs={specToks(1)} /> } def loadKernel( cls: ClassPath.ClassInfo ): Option[Kernel] = try { cls.load().getConstructor().newInstance() match { case kernel: Kernel => Some(kernel); case _ => logger.error( "Error loading Kernel class-> Can't cast to Kernel: " + cls.getName ) None } } catch { case err: Exception => logger.warn( "%s(%s) Can't construct Kernel from class: %s".format( err.getClass.getName, err.getMessage, cls.getName ) ) None } } class KernelModule( val name: String, val kernels: Map[String,Option[Kernel]] ) extends Loggable { def getKernel(name: String): Option[Kernel] = kernels.get(name).flatten def getKernels: Iterable[Kernel] = kernels.values.flatten def getKernelNames: List[String] = kernels.keys.toList def getName: String = name def empty: Boolean = ( kernels.size == 0 ) def nonEmpty: Boolean = ( kernels.size > 0 ) def filter( visibility: Int ): KernelModule = { new KernelModule( name, kernels.filter { case ( _, kerOpt ) => kerOpt.exists( _.status >= visibility ) } ) } def toXml: xml.Elem = { <kernelModule name={name}> <kernels> { kernels.values.flatMap( _.map( _.toXmlHeader ) ) } </kernels> </kernelModule> } }
nasa-nccs-cds/EDAS
src/main/scala/nasa/nccs/edas/kernels/compute.scala
Scala
gpl-2.0
3,799
package breeze.linalg import org.scalatest._ import org.scalatest.junit._ import org.scalatest.prop._ import spire.implicits._ /** * * * @author stucchio */ class splitTest extends FunSuite { test("split works on arrays with even multiple") { val start = DenseVector[Double](1,2,3,4,5,6,7,8,9,10,11,12) val expectedResult = Seq(DenseVector[Double](1,2,3,4), DenseVector[Double](5,6,7,8), DenseVector[Double](9,10,11,12)) assert(split(start, 3) == expectedResult) } test("throws exception when vector.size is not divisible by number of elements in split") { intercept[IllegalArgumentException]{ split(DenseVector[Double](1,2,3,4,5,6,7,8,9,10,11), 3) } } test("split works on arrays with sequence argument multiple") { val start = DenseVector[Double](1,2,3,4,5,6,7,8,9,10,11,12) val expectedResult = Seq(DenseVector[Double](1,2,3), DenseVector[Double](4,5,6,7,8), DenseVector[Double](9,10,11,12)) assert(split(start, Seq(3,8)) == expectedResult) } test("hsplit works on dense matrix") { val mbig = DenseMatrix( (0,1,2,3,4,5), (3,4,5,6,7,8), (3,4,5,6,7,8), (5,4,5,9,7,8) ) val expectedResult = List(DenseMatrix( (0,1,2), (3,4,5), (3,4,5), (5,4,5) ), DenseMatrix( (3,4,5), (6,7,8), (6,7,8), (9,7,8) )) assert(hsplit(mbig, 2) == expectedResult) } test("vsplit works on dense matrix") { val mbig = DenseMatrix( (0,1,2,3,4,5), (3,4,5,6,7,8), (3,4,5,6,7,8), (5,4,5,9,7,8) ) val expectedResult = List(DenseMatrix( (0,1,2,3,4,5), (3,4,5,6,7,8) ), DenseMatrix( (3,4,5,6,7,8), (5,4,5,9,7,8) )) assert(vsplit(mbig, 2) == expectedResult) } test("#459") { assert(split(DenseVector(0.0,1.0),2) === IndexedSeq(DenseVector(0.0),DenseVector(1.0))) } }
claydonkey/breeze
math/src/test/scala/breeze/linalg/splitTest.scala
Scala
apache-2.0
1,876
/****************************************************************************** * Copyright © 2016 Maxim Karpov * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * ******************************************************************************/ package ru.makkarpov.scalingua.test import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import ru.makkarpov.scalingua.{I18n, Language, Macros, OutputFormat} import scala.language.experimental.macros class CustomI18nTest extends AnyFlatSpec with Matchers { case class CStr(s: String) implicit val CStrFormat = new OutputFormat[CStr] { override def convert(s: String): CStr = CStr(s"C{$s}") override def escape(s: String): String = s"[$s]" } object CustomI18n extends I18n { def ct(msg: String, args: (String, Any)*)(implicit lang: Language, outputFormat: OutputFormat[CStr]): CStr = macro Macros.singular[CStr] } implicit val mockLang = new MockLang("") import CustomI18n._ it should "handle custom I18n classes via traits" in { t"Hello, world!" shouldBe "{s:Hello, world!}" } it should "handle custom methods in I18n classes" in { ct("Hello, world!").s shouldBe "C{{s:Hello, world!}}" ct("Hello, %(what)!", "what" -> "world").s shouldBe "C{{s:Hello, %(what)[[world]]!}}" """ ct("Hello, %(x)!", "y" -> 1) """ shouldNot compile } }
makkarpov/scalingua
scalingua/shared/src/test/scala/ru/makkarpov/scalingua/test/CustomI18nTest.scala
Scala
apache-2.0
2,335
package nest.sparkle.time.protocol import scala.concurrent.duration.DurationInt import scala.language.higherKinds import scala.util.{Failure, Success, Try} import nest.sparkle.datastream.{DataArray, TwoPartStream} import nest.sparkle.measure.{Detail, DummySpan, Span} import nest.sparkle.store.Event import nest.sparkle.util.{ObservableUtil, RecoverJsonFormat} import rx.lang.scala.Observable import spray.json._ /** returns an observable that produces one sequence of json arrays when the provided event stream completes */ object JsonEventWriter { /** returns an observable that produces a one item containing a sequence of json arrays * when the provided Event stream completes */ def fromObservableSingle[T: JsonWriter, U: JsonWriter](events: Observable[Event[T, U]]): Observable[Seq[JsArray]] = { events.map { eventToJsArray(_) }.toSeq // .toSeq returns an observable with a single item } /** returns an observable that produces multiple sequences of json arrays, when new data is available * on the incoming event stream */ def fromObservableMulti[T: JsonWriter, U: JsonWriter] // format: OFF (events: Observable[Event[T, U]], parentSpan:Option[Span] = None) : Observable[Seq[JsArray]] = { // format: ON // TODO should pass in Observable[Seq[Event]] rather than buffer // TODO untested val buffered = events.tumbling(50.milliseconds).flatMap { _.toSeq } val filtered = buffered.filterNot { _.isEmpty } fromObservableSeq(filtered, parentSpan) } /** return an Observable containing sequence-chunks of json data from an Observable containing sequence-chunks * of event data. */ def fromObservableSeq[T: JsonWriter, U: JsonWriter] // format: OFF (observed: Observable[Seq[Event[T, U]]], parentSpan:Option[Span] = None) : Observable[Seq[JsArray]] = { // format: ON implicit val parent = parentSpan.getOrElse(DummySpan) observed.map { eventSeq => Span("JsonEventWriter", Detail).time { eventSeq map { event => eventToJsArray(event) } } } } /** return an Observable that contains blocks of json data, ready to encode into protocol * responses. The first block contains all of the initial data available at the time of * the initial data request. Subsequent blocks contain ongoing data arriving subsequent * to the initial request. */ def fromDataStream[K, V, S[_, _]] // format: OFF ( dataStream: TwoPartStream[K, V, S], parentSpan: Span) : Observable[Array[JsArray]] = { // format: ON def combineToJson(implicit keyWriter: JsonWriter[K], valueWriter: JsonWriter[V]) // format: OFF : Observable[Array[JsArray]] = { // format: ON val initialCombined = { val initialJsons = dataStream.mapInitial { toJsArray(_) } ObservableUtil.reduceSafe(initialJsons) { (a, b) => a ++ b } } // deliver empty array if initial result doesn't produce a value val initialEmptyIfNone = initialCombined.headOrElse(Array()) val ongoingJsons = dataStream.mapOngoing { toJsArray(_) } initialEmptyIfNone ++ ongoingJsons } jsonWriters(dataStream) match { case Success((keyWriter, valueWriter)) => combineToJson(keyWriter, valueWriter) case Failure(err) => Observable.error(err) } } private def jsonWriters[K, V, S[_, _]](dataStream: TwoPartStream[K, V, S]): Try[(JsonWriter[K], JsonWriter[V])] = { for { keyWriter <- RecoverJsonFormat.tryJsonFormat[K](dataStream.keyType) valueWriter <- RecoverJsonFormat.tryJsonFormat[V](dataStream.valueType) } yield { (keyWriter, valueWriter) } } private def toJsArray[K: JsonWriter, V: JsonWriter] // format: OFF ( DataArray:DataArray[K,V] ) : Array[JsArray] = { // format: ON DataArray.mapToArray { (key, value) => JsArray(key.toJson, value.toJson) } } /** return the JsArray for one event */ private def eventToJsArray[T: JsonWriter, U: JsonWriter](event: Event[T, U]): JsArray = { JsArray(event.key.toJson, event.value.toJson) } }
mighdoll/sparkle
protocol/src/main/scala/nest/sparkle/time/protocol/JsonEventWriter.scala
Scala
apache-2.0
4,117
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.connector import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, CreateTablePartitioningValidationSuite, ResolvedTable, TestRelation2, TestTable2, UnresolvedFieldName, UnresolvedFieldPosition} import org.apache.spark.sql.catalyst.plans.logical.{AddColumns, AlterColumn, AlterTableCommand, CreateTableAsSelect, DropColumns, LogicalPlan, QualifiedColType, RenameColumn, ReplaceColumns, ReplaceTableAsSelect} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.connector.catalog.Identifier import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition import org.apache.spark.sql.connector.expressions.Expressions import org.apache.spark.sql.execution.datasources.PreprocessTableCreation import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{LongType, StringType} class V2CommandsCaseSensitivitySuite extends SharedSparkSession with AnalysisTest { import CreateTablePartitioningValidationSuite._ import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ private val table = ResolvedTable( catalog, Identifier.of(Array(), "table_name"), TestTable2, schema.toAttributes) override protected def extendedAnalysisRules: Seq[Rule[LogicalPlan]] = { Seq(PreprocessTableCreation(spark)) } test("CreateTableAsSelect: using top level field for partitioning") { Seq(true, false).foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { Seq("ID", "iD").foreach { ref => val plan = CreateTableAsSelect( catalog, Identifier.of(Array(), "table_name"), Expressions.identity(ref) :: Nil, TestRelation2, Map.empty, Map.empty, ignoreIfExists = false) if (caseSensitive) { assertAnalysisError(plan, Seq("Couldn't find column", ref), caseSensitive) } else { assertAnalysisSuccess(plan, caseSensitive) } } } } } test("CreateTableAsSelect: using nested column for partitioning") { Seq(true, false).foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { Seq("POINT.X", "point.X", "poInt.x", "poInt.X").foreach { ref => val plan = CreateTableAsSelect( catalog, Identifier.of(Array(), "table_name"), Expressions.bucket(4, ref) :: Nil, TestRelation2, Map.empty, Map.empty, ignoreIfExists = false) if (caseSensitive) { val field = ref.split("\\\\.") assertAnalysisError(plan, Seq("Couldn't find column", field.head), caseSensitive) } else { assertAnalysisSuccess(plan, caseSensitive) } } } } } test("ReplaceTableAsSelect: using top level field for partitioning") { Seq(true, false).foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { Seq("ID", "iD").foreach { ref => val plan = ReplaceTableAsSelect( catalog, Identifier.of(Array(), "table_name"), Expressions.identity(ref) :: Nil, TestRelation2, Map.empty, Map.empty, orCreate = true) if (caseSensitive) { assertAnalysisError(plan, Seq("Couldn't find column", ref), caseSensitive) } else { assertAnalysisSuccess(plan, caseSensitive) } } } } } test("ReplaceTableAsSelect: using nested column for partitioning") { Seq(true, false).foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { Seq("POINT.X", "point.X", "poInt.x", "poInt.X").foreach { ref => val plan = ReplaceTableAsSelect( catalog, Identifier.of(Array(), "table_name"), Expressions.bucket(4, ref) :: Nil, TestRelation2, Map.empty, Map.empty, orCreate = true) if (caseSensitive) { val field = ref.split("\\\\.") assertAnalysisError(plan, Seq("Couldn't find column", field.head), caseSensitive) } else { assertAnalysisSuccess(plan, caseSensitive) } } } } } test("AlterTable: add column - nested") { Seq("POINT.Z", "poInt.z", "poInt.Z").foreach { ref => val field = ref.split("\\\\.") alterTableTest( AddColumns( table, Seq(QualifiedColType( Some(UnresolvedFieldName(field.init)), field.last, LongType, true, None, None))), Seq("Missing field " + field.head) ) } } test("AlterTable: add column resolution - positional") { Seq("ID", "iD").foreach { ref => alterTableTest( AddColumns( table, Seq(QualifiedColType( None, "f", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after(ref)))))), Seq("reference column", ref) ) } } test("AlterTable: add column resolution - column position referencing new column") { alterTableTest( AddColumns( table, Seq(QualifiedColType( None, "x", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after("id")))), QualifiedColType( None, "y", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after("X")))))), Seq("Couldn't find the reference column for AFTER X at root") ) } test("AlterTable: add column resolution - nested positional") { Seq("X", "Y").foreach { ref => alterTableTest( AddColumns( table, Seq(QualifiedColType( Some(UnresolvedFieldName(Seq("point"))), "z", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after(ref)))))), Seq("reference column", ref) ) } } test("AlterTable: add column resolution - column position referencing new nested column") { alterTableTest( AddColumns( table, Seq(QualifiedColType( Some(UnresolvedFieldName(Seq("point"))), "z", LongType, true, None, None), QualifiedColType( Some(UnresolvedFieldName(Seq("point"))), "zz", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after("Z")))))), Seq("Couldn't find the reference column for AFTER Z at point") ) } test("SPARK-36372: Adding duplicate columns should not be allowed") { alterTableTest( AddColumns( table, Seq(QualifiedColType( Some(UnresolvedFieldName(Seq("point"))), "z", LongType, true, None, None), QualifiedColType( Some(UnresolvedFieldName(Seq("point"))), "Z", LongType, true, None, None))), Seq("Found duplicate column(s) in the user specified columns: `point.z`"), expectErrorOnCaseSensitive = false) } test("SPARK-36381: Check column name exist case sensitive and insensitive when add column") { alterTableTest( AddColumns( table, Seq(QualifiedColType( None, "ID", LongType, true, None, Some(UnresolvedFieldPosition(ColumnPosition.after("id")))))), Seq("Cannot add column, because ID already exists in root"), expectErrorOnCaseSensitive = false) } test("SPARK-36381: Check column name exist case sensitive and insensitive when rename column") { alterTableTest( RenameColumn(table, UnresolvedFieldName(Array("id")), "DATA"), Seq("Cannot rename column, because DATA already exists in root"), expectErrorOnCaseSensitive = false) } test("AlterTable: drop column resolution") { Seq(Array("ID"), Array("point", "X"), Array("POINT", "X"), Array("POINT", "x")).foreach { ref => alterTableTest( DropColumns(table, Seq(UnresolvedFieldName(ref))), Seq("Missing field " + ref.quoted) ) } } test("AlterTable: rename column resolution") { Seq(Array("ID"), Array("point", "X"), Array("POINT", "X"), Array("POINT", "x")).foreach { ref => alterTableTest( RenameColumn(table, UnresolvedFieldName(ref), "newName"), Seq("Missing field " + ref.quoted) ) } } test("AlterTable: drop column nullability resolution") { Seq(Array("ID"), Array("point", "X"), Array("POINT", "X"), Array("POINT", "x")).foreach { ref => alterTableTest( AlterColumn(table, UnresolvedFieldName(ref), None, Some(true), None, None), Seq("Missing field " + ref.quoted) ) } } test("AlterTable: change column type resolution") { Seq(Array("ID"), Array("point", "X"), Array("POINT", "X"), Array("POINT", "x")).foreach { ref => alterTableTest( AlterColumn(table, UnresolvedFieldName(ref), Some(StringType), None, None, None), Seq("Missing field " + ref.quoted) ) } } test("AlterTable: change column comment resolution") { Seq(Array("ID"), Array("point", "X"), Array("POINT", "X"), Array("POINT", "x")).foreach { ref => alterTableTest( AlterColumn(table, UnresolvedFieldName(ref), None, None, Some("comment"), None), Seq("Missing field " + ref.quoted) ) } } test("SPARK-36449: Replacing columns with duplicate name should not be allowed") { alterTableTest( ReplaceColumns( table, Seq(QualifiedColType(None, "f", LongType, true, None, None), QualifiedColType(None, "F", LongType, true, None, None))), Seq("Found duplicate column(s) in the user specified columns: `f`"), expectErrorOnCaseSensitive = false) } private def alterTableTest( alter: => AlterTableCommand, error: Seq[String], expectErrorOnCaseSensitive: Boolean = true): Unit = { Seq(true, false).foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { val expectError = if (expectErrorOnCaseSensitive) caseSensitive else !caseSensitive if (expectError) { assertAnalysisError(alter, error, caseSensitive) } else { assertAnalysisSuccess(alter, caseSensitive) } } } } }
chuckchen/spark
sql/core/src/test/scala/org/apache/spark/sql/connector/V2CommandsCaseSensitivitySuite.scala
Scala
apache-2.0
11,569
package com.sjsu.bikeshare.service import com.sjsu.bikeshare.domain.Notification import com.mongodb.util.JSON import com.mongodb.casbah.Imports._ import com.mongodb.util.JSON import java.util.{ List, ArrayList } import java.text.SimpleDateFormat; import java.util.Date; import java.text.DateFormat; import java.util.{ TimeZone, Formatter } object NotificationRepository { val dateFormat: DateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz"); dateFormat.setTimeZone(TimeZone.getTimeZone("GMT")) def save(notification: Notification): Notification = { var dbObject = MongoDBObject("collName" -> "userid") val fieldsMDBO = MongoDBObject("seq" -> 1, "_id" -> 0) val counterRes = MongoFactory.counterCollection.findAndModify(dbObject, update = $inc("seq" -> 1), upsert = true, fields = fieldsMDBO, sort = null, remove = false, returnNew = true).get val id = counterRes.get("seq").asInstanceOf[Int] var dbObject1 = MongoDBObject("noteId"->id,"ownerId" -> notification.getOwnerId, "requesterId" -> notification.requestorId, "fromDate" -> notification.fromDate,"toDate"->notification.toDate,"status" -> notification.status,"bikeId" -> notification.bikeId) MongoFactory.notificationCollection.insert(dbObject1,WriteConcern.Safe) println("Saved") notification } }
swathimr/Community-Bike-Share
src/main/scala/com/sjsu/bikeshare/service/NotificationRepository.scala
Scala
mit
1,346
package com.twitter.finagle.redis.protocol import com.twitter.finagle.redis.util.StringToChannelBuffer import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers} import scala.collection.immutable.WrappedString private[redis] object RedisCodec { object NilValue extends WrappedString("nil") { def getBytes(charset: String = "UTF_8") = Array[Byte]() def getBytes = Array[Byte]() } val STATUS_REPLY = '+' val ERROR_REPLY = '-' val INTEGER_REPLY = ':' val BULK_REPLY = '$' val MBULK_REPLY = '*' val ARG_COUNT_MARKER = '*' val ARG_SIZE_MARKER = '$' val TOKEN_DELIMITER = ' ' val EOL_DELIMITER = "\\r\\n" val NIL_VALUE = NilValue val NIL_VALUE_BA = ChannelBuffers.EMPTY_BUFFER val STATUS_REPLY_BA = StringToChannelBuffer("+") val ERROR_REPLY_BA = StringToChannelBuffer("-") val INTEGER_REPLY_BA = StringToChannelBuffer(":") val BULK_REPLY_BA = StringToChannelBuffer("$") val MBULK_REPLY_BA = StringToChannelBuffer("*") val ARG_COUNT_MARKER_BA = MBULK_REPLY_BA val ARG_SIZE_MARKER_BA = BULK_REPLY_BA val NIL_BULK_REPLY_BA = StringToChannelBuffer("$-1") val EMPTY_MBULK_REPLY_BA = StringToChannelBuffer("*0") val NIL_MBULK_REPLY_BA = StringToChannelBuffer("*-1") val TOKEN_DELIMITER_BA = StringToChannelBuffer(" ") val EOL_DELIMITER_BA = StringToChannelBuffer(EOL_DELIMITER) val POS_INFINITY_BA = StringToChannelBuffer("+inf") val NEG_INFINITY_BA = StringToChannelBuffer("-inf") def toUnifiedFormat(args: Seq[ChannelBuffer], includeHeader: Boolean = true) = { val header = includeHeader match { case true => Seq(ARG_COUNT_MARKER_BA, StringToChannelBuffer(args.length.toString), EOL_DELIMITER_BA) case false => Nil } val buffers = args.map({ arg => Seq( ARG_SIZE_MARKER_BA, StringToChannelBuffer(arg.readableBytes.toString), EOL_DELIMITER_BA, arg, EOL_DELIMITER_BA ) }).flatten ChannelBuffers.wrappedBuffer((header ++ buffers).toArray:_*) } } abstract class RedisMessage { def toChannelBuffer: ChannelBuffer def toByteArray: Array[Byte] = toChannelBuffer.array }
travisbrown/finagle
finagle-redis/src/main/scala/com/twitter/finagle/redis/protocol/Codec.scala
Scala
apache-2.0
2,249