code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.rackspace.prefs
import com.mchange.v2.c3p0.ComboPooledDataSource
import com.rackspace.prefs.model.InitDbTrait
import org.junit.runner.RunWith
import org.scalatest.FunSuiteLike
import org.scalatest.junit.JUnitRunner
import org.scalatra.test.scalatest.ScalatraSuite
import scala.slick.jdbc.JdbcBackend._
import scala.util.parsing.json.JSON
/**
* Created by shin4590 on 3/5/15.
*/
@RunWith(classOf[JUnitRunner])
class StatusTest extends ScalatraSuite with FunSuiteLike with InitDbTrait {
val db = Database.forDataSource(new ComboPooledDataSource)
override def beforeAll {
super.beforeAll
clearData(db)
}
test("should get 200: GET /status") {
addServlet(new PreferencesService(db), "/*")
get("/status") {
status should equal (200)
val countConversionFunc = {input: String => new Integer(input)}
JSON.perThreadNumberParser = countConversionFunc
val prefStatus = JSON.parseFull(body)
val statusMap = prefStatus.get.asInstanceOf[Map[String, Int]]
statusMap.get("metadata-count").get should be > (0)
info(body)
}
}
// this needs to be the last test run, cuz it's going to drop
// the Preferences Metadata table
test("should get 500: GET /status when metadata table does not exist") {
dropMetadata(db)
try {
get("/status") {
status should equal(500)
info(body)
}
} finally {
createMetadata(db)
}
}
}
| VinnyQ/cloudfeeds-preferences-svc | app/src/test/scala/com/rackspace/prefs/StatusTest.scala | Scala | apache-2.0 | 1,572 |
package cc.ferreira.gcal2slack.calendar
import java.time.{LocalDate, LocalDateTime}
// Disallow direct access to the primary constructor: allDay is set only from the companion object
case class CalendarEvent private (title: String, begin: LocalDateTime, end: LocalDateTime, allDay: Boolean) {
require(begin.compareTo(end) <= 0, s"Invalid event interval from $begin to $end")
def contains(time: LocalDateTime): Boolean =
begin.isEqual(time) || begin.isBefore(time) && end.isAfter(time) || end.isEqual(time)
def contains(text: String): Boolean = title.contains(text)
}
object CalendarEvent {
/** Makes an all-day event */
def apply(title: String, day: LocalDate): CalendarEvent =
new CalendarEvent(title, day.atStartOfDay, day.plusDays(1).atStartOfDay, allDay = true)
/** Makes a regular event */
def apply(title: String, start: LocalDateTime, end: LocalDateTime): CalendarEvent =
new CalendarEvent(title, start, end, allDay = false)
// Disallow access to the default apply method: allDay is set only from the other apply methods
private def apply(title: String, start: LocalDateTime, end: LocalDateTime, allDay: Boolean): CalendarEvent =
new CalendarEvent(title, start, end, allDay)
}
| hugocf/gcal-slack-update | src/main/scala/cc/ferreira/gcal2slack/calendar/CalendarEvent.scala | Scala | mit | 1,225 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.util.{Locale, TimeZone}
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._
import org.apache.spark.util.Utils
/**
* The test suite for window functions. To actually compare results with Hive,
* every test should be created by `createQueryTest`. Because we are reusing tables
* for different tests and there are a few properties needed to let Hive generate golden
* files, every `createQueryTest` calls should explicitly set `reset` to `false`.
*/
class HiveWindowFunctionQuerySuite extends HiveComparisonTest with BeforeAndAfter {
private val originalTimeZone = TimeZone.getDefault
private val originalLocale = Locale.getDefault
private val testTempDir = Utils.createTempDir()
override def beforeAll() {
TestHive.cacheTables = true
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
// Create the table used in windowing.q
sql("DROP TABLE IF EXISTS part")
sql(
"""
|CREATE TABLE part(
| p_partkey INT,
| p_name STRING,
| p_mfgr STRING,
| p_brand STRING,
| p_type STRING,
| p_size INT,
| p_container STRING,
| p_retailprice DOUBLE,
| p_comment STRING)
""".stripMargin)
val testData1 = TestHive.getHiveFile("data/files/part_tiny.txt").getCanonicalPath
sql(
s"""
|LOAD DATA LOCAL INPATH '$testData1' overwrite into table part
""".stripMargin)
sql("DROP TABLE IF EXISTS over1k")
sql(
"""
|create table over1k(
| t tinyint,
| si smallint,
| i int,
| b bigint,
| f float,
| d double,
| bo boolean,
| s string,
| ts timestamp,
| dec decimal(4,2),
| bin binary)
|row format delimited
|fields terminated by '|'
""".stripMargin)
val testData2 = TestHive.getHiveFile("data/files/over1k").getCanonicalPath
sql(
s"""
|LOAD DATA LOCAL INPATH '$testData2' overwrite into table over1k
""".stripMargin)
// The following settings are used for generating golden files with Hive.
// We have to use kryo to correctly let Hive serialize plans with window functions.
// This is used to generate golden files.
sql("set hive.plan.serialization.format=kryo")
// Explicitly set fs to local fs.
sql(s"set fs.default.name=file://$testTempDir/")
// Ask Hive to run jobs in-process as a single map and reduce task.
sql("set mapred.job.tracker=local")
}
override def afterAll() {
TestHive.cacheTables = false
TimeZone.setDefault(originalTimeZone)
Locale.setDefault(originalLocale)
TestHive.reset()
}
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_multipartitioning.q
// Results of the original query file are not deterministic.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_multipartitioning.q (deterministic) 1",
s"""
|select s,
|rank() over (partition by s order by si) r,
|sum(b) over (partition by s order by si) sum
|from over1k
|order by s, r, sum;
""".stripMargin, reset = false)
/* timestamp comparison issue with Hive?
createQueryTest("windowing_multipartitioning.q (deterministic) 2",
s"""
|select s,
|rank() over (partition by s order by dec desc) r,
|sum(b) over (partition by s order by ts desc) as sum
|from over1k
|where s = 'tom allen' or s = 'bob steinbeck'
|order by s, r, sum;
""".stripMargin, reset = false)
*/
createQueryTest("windowing_multipartitioning.q (deterministic) 3",
s"""
|select s, sum(i) over (partition by s), sum(f) over (partition by si)
|from over1k where s = 'tom allen' or s = 'bob steinbeck';
""".stripMargin, reset = false)
createQueryTest("windowing_multipartitioning.q (deterministic) 4",
s"""
|select s, rank() over (partition by s order by bo),
|rank() over (partition by si order by bin desc) from over1k
|where s = 'tom allen' or s = 'bob steinbeck';
""".stripMargin, reset = false)
createQueryTest("windowing_multipartitioning.q (deterministic) 5",
s"""
|select s, sum(f) over (partition by i), row_number() over (order by f)
|from over1k where s = 'tom allen' or s = 'bob steinbeck';
""".stripMargin, reset = false)
createQueryTest("windowing_multipartitioning.q (deterministic) 6",
s"""
|select s, rank() over w1,
|rank() over w2
|from over1k
|where s = 'tom allen' or s = 'bob steinbeck'
|window
|w1 as (partition by s order by dec),
|w2 as (partition by si order by f) ;
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_navfn.q
// Results of the original query file are not deterministic.
// Also, the original query of
// select i, lead(s) over (partition by bin order by d,i desc) from over1k ;
//Lag和Lead分析函数可以在同一次查询中取出同一字段的前N行的数据(Lag)和后N行的数据(Lead)作为独立的列。
//这种操作可以代替表的自联接,并且LAG和LEAD有更高的效率。
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_navfn.q (deterministic)",
s"""
|select s, row_number() over (partition by d order by dec) rn from over1k
|order by s, rn desc;
|select i, lead(s) over (partition by cast(bin as string) order by d,i desc) as l
|from over1k
|order by i desc, l;
|select i, lag(dec) over (partition by i order by s,i,dec) l from over1k
|order by i, l;
|select s, last_value(t) over (partition by d order by f) l from over1k
|order by s, l;
|select s, first_value(s) over (partition by bo order by s) f from over1k
|order by s, f;
|select t, s, i, last_value(i) over (partition by t order by s)
|from over1k where (s = 'oscar allen' or s = 'oscar carson') and t = 10;
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_ntile.q
// Results of the original query file are not deterministic.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_ntile.q (deterministic)",
s"""
|select i, ntile(10) over (partition by s order by i) n from over1k
|order by i, n;
|select s, ntile(100) over (partition by i order by s) n from over1k
|order by s, n;
|select f, ntile(4) over (partition by d order by f) n from over1k
|order by f, n;
|select d, ntile(1000) over (partition by dec order by d) n from over1k
|order by d, n;
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_udaf.q
// Results of the original query file are not deterministic.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_udaf.q (deterministic)",
s"""
|select s, min(i) over (partition by s) m from over1k
|order by s, m;
|select s, avg(f) over (partition by si order by s) a from over1k
|order by s, a;
|select s, avg(i) over (partition by t, b order by s) a from over1k
|order by s, a;
|select max(i) over w m from over1k
|order by m window w as (partition by f) ;
|select s, avg(d) over (partition by t order by f) a from over1k
|order by s, a;
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_windowspec.q
// Results of the original query file are not deterministic.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_windowspec.q (deterministic)",
s"""
|select s, sum(b) over (partition by i order by s,b rows unbounded preceding) as sum
|from over1k order by s, sum;
|select s, sum(f) over (partition by d order by s,f rows unbounded preceding) as sum
|from over1k order by s, sum;
|select s, sum(f) over
|(partition by ts order by f range between current row and unbounded following) as sum
|from over1k order by s, sum;
|select s, avg(f)
|over (partition by ts order by s,f rows between current row and 5 following) avg
|from over1k order by s, avg;
|select s, avg(d) over
|(partition by t order by s,d desc rows between 5 preceding and 5 following) avg
|from over1k order by s, avg;
|select s, sum(i) over(partition by ts order by s) sum from over1k
|order by s, sum;
|select f, sum(f) over
|(partition by ts order by f range between unbounded preceding and current row) sum
|from over1k order by f, sum;
|select s, i, round(avg(d) over (partition by s order by i) / 10.0 , 2) avg
|from over1k order by s, i, avg;
|select s, i, round((avg(d) over w1 + 10.0) - (avg(d) over w1 - 10.0),2) avg
|from over1k
|order by s, i, avg window w1 as (partition by s order by i);
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests based on windowing_rank.q
// Results of the original query file are not deterministic.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing_rank.q (deterministic) 1",
s"""
|select s, rank() over (partition by f order by t) r from over1k order by s, r;
|select s, dense_rank() over (partition by ts order by i,s desc) as r from over1k
|order by s desc, r desc;
|select s, cume_dist() over (partition by bo order by b,s) cd from over1k
|order by s, cd;
|select s, percent_rank() over (partition by dec order by f) r from over1k
|order by s desc, r desc;
""".stripMargin, reset = false)
createQueryTest("windowing_rank.q (deterministic) 2",
s"""
|select ts, dec, rnk
|from
| (select ts, dec,
| rank() over (partition by ts order by dec) as rnk
| from
| (select other.ts, other.dec
| from over1k other
| join over1k on (other.b = over1k.b)
| ) joined
| ) ranked
|where rnk = 1
|order by ts, dec, rnk;
""".stripMargin, reset = false)
createQueryTest("windowing_rank.q (deterministic) 3",
s"""
|select ts, dec, rnk
|from
| (select ts, dec,
| rank() over (partition by ts order by dec) as rnk
| from
| (select other.ts, other.dec
| from over1k other
| join over1k on (other.b = over1k.b)
| ) joined
| ) ranked
|where dec = 89.5
|order by ts, dec, rnk;
""".stripMargin, reset = false)
createQueryTest("windowing_rank.q (deterministic) 4",
s"""
|select ts, dec, rnk
|from
| (select ts, dec,
| rank() over (partition by ts order by dec) as rnk
| from
| (select other.ts, other.dec
| from over1k other
| join over1k on (other.b = over1k.b)
| where other.t < 10
| ) joined
| ) ranked
|where rnk = 1
|order by ts, dec, rnk;
""".stripMargin, reset = false)
/////////////////////////////////////////////////////////////////////////////
// Tests from windowing.q
// We port tests in windowing.q to here because this query file contains too
// many tests and the syntax of test "-- 7. testJoinWithWindowingAndPTF"
// is not supported right now.
/////////////////////////////////////////////////////////////////////////////
createQueryTest("windowing.q -- 1. testWindowing",
s"""
|select p_mfgr, p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|sum(p_retailprice) over
|(distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 2. testGroupByWithPartitioning",
s"""
|select p_mfgr, p_name, p_size,
|min(p_retailprice),
|rank() over(distribute by p_mfgr sort by p_name)as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
|from part
|group by p_mfgr, p_name, p_size
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 3. testGroupByHavingWithSWQ",
s"""
|select p_mfgr, p_name, p_size, min(p_retailprice),
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
|from part
|group by p_mfgr, p_name, p_size
|having p_size > 0
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 4. testCount",
s"""
|select p_mfgr, p_name,
|count(p_size) over(distribute by p_mfgr sort by p_name) as cd
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 5. testCountWithWindowingUDAF",
s"""
|select p_mfgr, p_name,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|count(p_size) over(distribute by p_mfgr sort by p_name) as cd,
|p_retailprice, sum(p_retailprice) over (distribute by p_mfgr sort by p_name
| rows between unbounded preceding and current row) as s1,
|p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 6. testCountInSubQ",
s"""
|select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz
|from (select p_mfgr, p_name,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|count(p_size) over(distribute by p_mfgr sort by p_name) as cd,
|p_retailprice, sum(p_retailprice) over (distribute by p_mfgr sort by p_name
| rows between unbounded preceding and current row) as s1,
|p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
|from part
|) sub1
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 8. testMixedCaseAlias",
s"""
|select p_mfgr, p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name, p_size desc) as R
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 9. testHavingWithWindowingNoGBY",
s"""
|select p_mfgr, p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_name
| rows between unbounded preceding and current row) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 10. testHavingWithWindowingCondRankNoGBY",
s"""
|select p_mfgr, p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_name
| rows between unbounded preceding and current row) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 11. testFirstLast",
s"""
|select p_mfgr,p_name, p_size,
|sum(p_size) over (distribute by p_mfgr sort by p_name
|rows between current row and current row) as s2,
|first_value(p_size) over w1 as f,
|last_value(p_size, false) over w1 as l
|from part
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 12. testFirstLastWithWhere",
s"""
|select p_mfgr,p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|sum(p_size) over (distribute by p_mfgr sort by p_name
|rows between current row and current row) as s2,
|first_value(p_size) over w1 as f,
|last_value(p_size, false) over w1 as l
|from part
|where p_mfgr = 'Manufacturer#3'
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 13. testSumWindow",
s"""
|select p_mfgr,p_name, p_size,
|sum(p_size) over w1 as s1,
|sum(p_size) over (distribute by p_mfgr sort by p_name
|rows between current row and current row) as s2
|from part
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 14. testNoSortClause",
s"""
|select p_mfgr,p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr
|from part
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 15. testExpressions",
s"""
|select p_mfgr,p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|cume_dist() over(distribute by p_mfgr sort by p_name) as cud,
|percent_rank() over(distribute by p_mfgr sort by p_name) as pr,
|ntile(3) over(distribute by p_mfgr sort by p_name) as nt,
|count(p_size) over(distribute by p_mfgr sort by p_name) as ca,
|avg(p_size) over(distribute by p_mfgr sort by p_name) as avg,
|stddev(p_size) over(distribute by p_mfgr sort by p_name) as st,
|first_value(p_size % 5) over(distribute by p_mfgr sort by p_name) as fv,
|last_value(p_size) over(distribute by p_mfgr sort by p_name) as lv,
|first_value(p_size) over w1 as fvW1
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 16. testMultipleWindows",
s"""
|select p_mfgr,p_name, p_size,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|cume_dist() over(distribute by p_mfgr sort by p_name) as cud,
|sum(p_size) over (distribute by p_mfgr sort by p_name
|range between unbounded preceding and current row) as s1,
|sum(p_size) over (distribute by p_mfgr sort by p_size
|range between 5 preceding and current row) as s2,
|first_value(p_size) over w1 as fv1
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 17. testCountStar",
s"""
|select p_mfgr,p_name, p_size,
|count(*) over(distribute by p_mfgr sort by p_name ) as c,
|count(p_size) over(distribute by p_mfgr sort by p_name) as ca,
|first_value(p_size) over w1 as fvW1
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 18. testUDAFs",
s"""
|select p_mfgr,p_name, p_size,
|sum(p_retailprice) over w1 as s,
|min(p_retailprice) over w1 as mi,
|max(p_retailprice) over w1 as ma,
|avg(p_retailprice) over w1 as ag
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 19. testUDAFsWithGBY",
"""
|select p_mfgr,p_name, p_size, p_retailprice,
|sum(p_retailprice) over w1 as s,
|min(p_retailprice) as mi ,
|max(p_retailprice) as ma ,
|avg(p_retailprice) over w1 as ag
|from part
|group by p_mfgr,p_name, p_size, p_retailprice
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following);
""".stripMargin, reset = false)
// collect_set() output array in an arbitrary order, hence causes different result
// when running this test suite under Java 7 and 8.
// We change the original sql query a little bit for making the test suite passed
// under different JDK
createQueryTest("windowing.q -- 20. testSTATs",
"""
|select p_mfgr,p_name, p_size, sdev, sdev_pop, uniq_data, var, cor, covarp
|from (
|select p_mfgr,p_name, p_size,
|stddev(p_retailprice) over w1 as sdev,
|stddev_pop(p_retailprice) over w1 as sdev_pop,
|collect_set(p_size) over w1 as uniq_size,
|variance(p_retailprice) over w1 as var,
|corr(p_size, p_retailprice) over w1 as cor,
|covar_pop(p_size, p_retailprice) over w1 as covarp
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
|) t lateral view explode(uniq_size) d as uniq_data
|order by p_mfgr,p_name, p_size, sdev, sdev_pop, uniq_data, var, cor, covarp
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 21. testDISTs",
"""
|select p_mfgr,p_name, p_size,
|histogram_numeric(p_retailprice, 5) over w1 as hist,
|percentile(p_partkey, 0.5) over w1 as per,
|row_number() over(distribute by p_mfgr sort by p_name) as rn
|from part
|window w1 as (distribute by p_mfgr sort by p_mfgr, p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 24. testLateralViews",
"""
|select p_mfgr, p_name,
|lv_col, p_size, sum(p_size) over w1 as s
|from (select p_mfgr, p_name, p_size, array(1,2,3) arr from part) p
|lateral view explode(arr) part_lv as lv_col
|window w1 as (distribute by p_mfgr sort by p_size, lv_col
| rows between 2 preceding and current row)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 26. testGroupByHavingWithSWQAndAlias",
"""
|select p_mfgr, p_name, p_size, min(p_retailprice) as mi,
|rank() over(distribute by p_mfgr sort by p_name) as r,
|dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
|p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
|from part
|group by p_mfgr, p_name, p_size
|having p_size > 0
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 27. testMultipleRangeWindows",
"""
|select p_mfgr,p_name, p_size,
|sum(p_size) over (distribute by p_mfgr sort by p_size
|range between 10 preceding and current row) as s2,
|sum(p_size) over (distribute by p_mfgr sort by p_size
|range between current row and 10 following ) as s1
|from part
|window w1 as (rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 28. testPartOrderInUDAFInvoke",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over (partition by p_mfgr order by p_name
|rows between 2 preceding and 2 following) as s
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 29. testPartOrderInWdwDef",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s
|from part
|window w1 as (partition by p_mfgr order by p_name
| rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 30. testDefaultPartitioningSpecRules",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s,
|sum(p_size) over w2 as s2
|from part
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following),
| w2 as (partition by p_mfgr order by p_name)
""".stripMargin, reset = false)
/* p_name is not a numeric column. What is Hive's semantic?
createQueryTest("windowing.q -- 31. testWindowCrossReference",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s1,
|sum(p_size) over w2 as s2
|from part
|window w1 as (partition by p_mfgr order by p_name
| range between 2 preceding and 2 following),
| w2 as w1
""".stripMargin, reset = false)
*/
/*
createQueryTest("windowing.q -- 32. testWindowInheritance",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s1,
|sum(p_size) over w2 as s2
|from part
|window w1 as (partition by p_mfgr order by p_name
| range between 2 preceding and 2 following),
| w2 as (w1 rows between unbounded preceding and current row)
""".stripMargin, reset = false)
*/
/* p_name is not a numeric column. What is Hive's semantic?
createQueryTest("windowing.q -- 33. testWindowForwardReference",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s1,
|sum(p_size) over w2 as s2,
|sum(p_size) over w3 as s3
|from part
|window w1 as (distribute by p_mfgr sort by p_name
| range between 2 preceding and 2 following),
| w2 as w3,
| w3 as (distribute by p_mfgr sort by p_name
| range between unbounded preceding and current row)
""".stripMargin, reset = false)
*/
/*
createQueryTest("windowing.q -- 34. testWindowDefinitionPropagation",
"""
|select p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s1,
|sum(p_size) over w2 as s2,
|sum(p_size) over (w3 rows between 2 preceding and 2 following) as s3
|from part
|window w1 as (distribute by p_mfgr sort by p_name
| range between 2 preceding and 2 following),
| w2 as w3,
| w3 as (distribute by p_mfgr sort by p_name
| range between unbounded preceding and current row)
""".stripMargin, reset = false)
*/
/* Seems Hive evaluate SELECT DISTINCT before window functions?
createQueryTest("windowing.q -- 35. testDistinctWithWindowing",
"""
|select DISTINCT p_mfgr, p_name, p_size,
|sum(p_size) over w1 as s
|from part
|window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following)
""".stripMargin, reset = false)
*/
createQueryTest("windowing.q -- 36. testRankWithPartitioning",
"""
|select p_mfgr, p_name, p_size,
|rank() over (partition by p_mfgr order by p_name ) as r
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 37. testPartitioningVariousForms",
"""
|select p_mfgr,
|round(sum(p_retailprice) over (partition by p_mfgr order by p_mfgr),2) as s1,
|min(p_retailprice) over (partition by p_mfgr) as s2,
|max(p_retailprice) over (distribute by p_mfgr sort by p_mfgr) as s3,
|round(avg(p_retailprice) over (distribute by p_mfgr),2) as s4,
|count(p_retailprice) over (cluster by p_mfgr ) as s5
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 38. testPartitioningVariousForms2",
"""
|select p_mfgr, p_name, p_size,
|sum(p_retailprice) over (partition by p_mfgr, p_name order by p_mfgr, p_name
|rows between unbounded preceding and current row) as s1,
|min(p_retailprice) over (distribute by p_mfgr, p_name sort by p_mfgr, p_name
|rows between unbounded preceding and current row) as s2,
|max(p_retailprice) over (partition by p_mfgr, p_name order by p_name) as s3
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 39. testUDFOnOrderCols",
"""
|select p_mfgr, p_type, substr(p_type, 2) as short_ptype,
|rank() over (partition by p_mfgr order by substr(p_type, 2)) as r
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 40. testNoBetweenForRows",
"""
|select p_mfgr, p_name, p_size,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows unbounded preceding) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 41. testNoBetweenForRange",
"""
|select p_mfgr, p_name, p_size,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_size range unbounded preceding) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 42. testUnboundedFollowingForRows",
"""
|select p_mfgr, p_name, p_size,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_name
|rows between current row and unbounded following) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 43. testUnboundedFollowingForRange",
"""
|select p_mfgr, p_name, p_size,
|sum(p_retailprice) over (distribute by p_mfgr sort by p_size
|range between current row and unbounded following) as s1
|from part
""".stripMargin, reset = false)
createQueryTest("windowing.q -- 44. testOverNoPartitionSingleAggregate",
"""
|select p_name, p_retailprice,
|round(avg(p_retailprice) over(),2)
|from part
|order by p_name
""".stripMargin, reset = false)
}
class HiveWindowFunctionQueryFileSuite
extends HiveCompatibilitySuite with BeforeAndAfter {
private val originalTimeZone = TimeZone.getDefault
private val originalLocale = Locale.getDefault
private val testTempDir = Utils.createTempDir()
override def beforeAll() {
TestHive.cacheTables = true
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
// The following settings are used for generating golden files with Hive.
// We have to use kryo to correctly let Hive serialize plans with window functions.
// This is used to generate golden files.
// sql("set hive.plan.serialization.format=kryo")
// Explicitly set fs to local fs.
// sql(s"set fs.default.name=file://$testTempDir/")
// Ask Hive to run jobs in-process as a single map and reduce task.
// sql("set mapred.job.tracker=local")
}
override def afterAll() {
TestHive.cacheTables = false
TimeZone.setDefault(originalTimeZone)
Locale.setDefault(originalLocale)
TestHive.reset()
}
override def blackList: Seq[String] = Seq(
// Partitioned table functions are not supported.
"ptf*",
// tests of windowing.q are in HiveWindowFunctionQueryBaseSuite
"windowing.q",
// This one failed on the expression of
// sum(lag(p_retailprice,1,0.0)) over w1
// lag(p_retailprice,1,0.0) is a GenericUDF and the argument inspector of
// p_retailprice created by HiveInspectors is
// PrimitiveObjectInspectorFactory.javaDoubleObjectInspector.
// However, seems Hive assumes it is
// PrimitiveObjectInspectorFactory.writableDoubleObjectInspector, which introduces an error.
"windowing_expressions",
// Hive's results are not deterministic
"windowing_multipartitioning",
"windowing_navfn",
"windowing_ntile",
"windowing_udaf",
"windowing_windowspec",
"windowing_rank"
)
override def whiteList: Seq[String] = Seq(
"windowing_udaf2",
"windowing_columnPruning",
"windowing_adjust_rowcontainer_sz"
)
// Only run those query tests in the realWhileList (do not try other ignored query files).
override def testCases: Seq[(String, File)] = super.testCases.filter {
case (name, _) => realWhiteList.contains(name)
}
}
| tophua/spark1.52 | sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveWindowFunctionQuerySuite.scala | Scala | apache-2.0 | 33,956 |
package dregex
import dregex.impl.RegexTree.{AbstractRange, CharRange, CharSet}
import dregex.impl.{UnicodeChar, Util}
import java.lang.Character.{UnicodeBlock, UnicodeScript}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import dregex.impl.UnicodeChar.FromIntConversion
import scala.collection.immutable.SortedMap
// [CROSS-BUILD] For immutable collections in Scala < 2.13
import scala.collection.immutable.Seq
// [CROSS-BUILD] For mapValues in Scala < 2.13
import scala.collection.compat._
/**
* This object will generate code with some Unicode information. We need code generation because the Java module
* system disallows by default some cross-module reflection. This program will do the reflection when run with the
* appropriate VM command-line arguments:
*
* --add-opens java.base/java.lang=ALL-UNNAMED
*/
object LiteralUnicodeGenerator {
private val blockRanges: Seq[(UnicodeBlock, (Int, Int))] = {
val blockStarts = Util.getPrivateStaticField[Array[Int]](classOf[UnicodeBlock], "blockStarts")
val javaBlocks = Util.getPrivateStaticField[Array[UnicodeBlock]](classOf[UnicodeBlock], "blocks").toSeq
blockStarts.indices.flatMap { i =>
val from = blockStarts(i)
val to =
if (i == blockStarts.length - 1)
UnicodeChar.max.codePoint
else
blockStarts(i + 1) - 1
// skip unassigned blocks
javaBlocks.map(Option(_)).apply(i).map { block =>
(block, (from, to))
}
}
}
private val blockAliases: Map[UnicodeBlock, Seq[String]] = {
val alias =
Util.getPrivateStaticField[java.util.Map[String, UnicodeBlock]](classOf[UnicodeBlock], "map").asScala.toMap
alias
.groupBy { case (_, v) => v }
// [CROSS-BUILD] For immutable collections in Scala < 2.13
.view
.mapValues(v => v.keys.toIndexedSeq)
.toMap
}
private val scriptRanges: Seq[(UnicodeScript, CharSet)] = {
val scriptStarts = Util.getPrivateStaticField[Array[Int]](classOf[UnicodeScript], "scriptStarts")
val javaScripts = Util.getPrivateStaticField[Array[UnicodeScript]](classOf[UnicodeScript], "scripts").toSeq
// [CROSS-BUILD] Using immutable SortedMap as the immutable one was introduced in Scala 2.12
var builder = SortedMap[UnicodeScript, CharSet]()
for (i <- scriptStarts.indices) {
val from = scriptStarts(i)
val to =
if (i == scriptStarts.length - 1)
UnicodeChar.max.codePoint
else
scriptStarts(i + 1) - 1
// skip unknown (unassigned) scripts
if (javaScripts(i) != UnicodeScript.UNKNOWN) {
val CharSet(existing) = builder.getOrElse(javaScripts(i), CharSet(Seq()))
builder += javaScripts(i) -> CharSet(existing :+ CharRange(from.u, to.u))
}
}
// [CROSS-BUILD] Ask for IndexedSeq to force immutable.Seq
builder.toIndexedSeq
}
private val scriptAliases: Map[UnicodeScript, Seq[String]] = {
val alias =
Util.getPrivateStaticField[java.util.Map[String, UnicodeScript]](classOf[UnicodeScript], "aliases").asScala.toMap
alias
.groupBy { case (_, v) => v }
// [CROSS-BUILD] For immutable collections in Scala < 2.13
.view
.mapValues(v => v.keys.toIndexedSeq)
.toMap
}
private def toHex(codePoint: Int): String = {
String.format("0x%04X", Int.box(codePoint))
}
private def toHexPair(range: AbstractRange): String = {
s"(${toHex(range.from.codePoint)}, ${toHex(range.to.codePoint)})"
}
private def toHexPairs(set: CharSet): String = {
set.ranges.map(toHexPair(_)).mkString(", ")
}
def main(args: Array[String]): Unit = {
println("val blocksRanges: Map[Seq[String], (Int, Int)] = Map(")
for (((block, (from, to)), i) <- blockRanges.zipWithIndex) yield {
val blocksAndAliases = blockAliases(block)
print(
s" Seq(${blocksAndAliases.map(name => '"' + name + '"').mkString(", ")}) -> (${toHex(from)}, ${toHex(to)})")
if (i < blockRanges.size - 1) {
print(",")
}
println()
}
println(")")
println()
println("val scriptRanges: Map[Seq[String], Seq[(Int, Int)]] = Map(")
for (((script, charSet), i) <- scriptRanges.zipWithIndex) yield {
val scriptsAndAliases = script.toString +: scriptAliases(script)
print(s" Seq(${scriptsAndAliases.map(name => '"' + name + '"').mkString(", ")}) -> Seq(${toHexPairs(charSet)})")
if (i < scriptRanges.size - 1) {
print(",")
}
println()
}
println(")")
}
}
| marianobarrios/dregex | src/test/scala/dregex/LiteralUnicodeGenerator.scala | Scala | bsd-2-clause | 4,528 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control.controls
import cats.syntax.option._
import org.orbeon.dom.Element
import org.orbeon.oxf.common.{OXFException, OrbeonLocationException}
import org.orbeon.oxf.util.CoreUtils._
import org.orbeon.oxf.xforms.XFormsContainingDocument
import org.orbeon.oxf.xforms.action.XFormsAPI
import org.orbeon.oxf.xforms.analysis.controls.SelectionControl
import org.orbeon.oxf.xforms.analysis.controls.SelectionControlTrait
import org.orbeon.oxf.xforms.control.XFormsControl.{ControlProperty, ImmutableControlProperty}
import org.orbeon.oxf.xforms.control._
import org.orbeon.oxf.xforms.event.events.{XFormsDeselectEvent, XFormsSelectEvent}
import org.orbeon.oxf.xforms.event.{Dispatch, XFormsEvent}
import org.orbeon.oxf.xforms.itemset.{Item, Itemset, ItemsetSupport, StaticItemsetSupport}
import org.orbeon.oxf.xforms.model.DataModel
import org.orbeon.oxf.xforms.state.ControlState
import org.orbeon.oxf.xforms.xbl.XBLContainer
import org.orbeon.oxf.xml.{SaxonUtils, XMLReceiverHelper}
import org.orbeon.oxf.xml.dom.XmlExtendedLocationData
import org.orbeon.saxon.om
import org.orbeon.scaxon.SimplePath._
import org.orbeon.xforms.XFormsNames._
import scala.util.control.NonFatal
class XFormsSelect1Control(
container : XBLContainer,
parent : XFormsControl,
element : Element,
id : String
) extends XFormsSingleNodeControl(
container,
parent,
element,
id
) with XFormsValueControl
with SingleNodeFocusableTrait {
selfControl =>
override type Control <: SelectionControl
// This is a var just for getBackCopy
private[XFormsSelect1Control] var itemsetProperty: ControlProperty[Itemset] = new MutableItemsetProperty(selfControl)
def mustEncodeValues: Boolean = XFormsSelect1Control.mustEncodeValues(containingDocument, staticControl)
def isFullAppearance: Boolean = staticControl.isFull
override def onCreate(restoreState: Boolean, state: Option[ControlState], update: Boolean): Unit = {
super.onCreate(restoreState, state, update)
// Evaluate itemsets only if restoring dynamic state
// NOTE: This doesn't sound like it is the right place to do this, does it?
if (restoreState)
getItemset
}
// Return the custom group name if present, otherwise return the effective id
def getGroupName: String =
extensionAttributeValue(XXFORMS_GROUP_QNAME) getOrElse getEffectiveId
override def hasJavaScriptInitialization: Boolean =
staticControl.appearances contains XFORMS_COMPACT_APPEARANCE_QNAME
override def markDirtyImpl(): Unit = {
super.markDirtyImpl()
itemsetProperty.handleMarkDirty()
}
// Get this control's itemset
// This requires the control to be relevant.
def getItemset: Itemset =
try {
// Non-relevant control doesn't have an itemset
require(isRelevant)
if (staticControl.isNorefresh)
// Items are not automatically refreshed and stored globally
// NOTE: Store them by prefixed id because the itemset might be different between XBL template instantiations
containingDocument.controls.getConstantItems(getPrefixedId) getOrElse {
val newItemset = ItemsetSupport.evaluateItemset(selfControl)
containingDocument.controls.setConstantItems(getPrefixedId, newItemset)
newItemset
}
else
// Items are stored in the control
itemsetProperty.value()
} catch {
case NonFatal(t) =>
throw OrbeonLocationException.wrapException(
t,
XmlExtendedLocationData(getLocationData, "evaluating itemset".some, element = Some(element))
)
}
override def evaluateExternalValue(): Unit = {
// If the control is relevant, its internal value and itemset must be defined
getValue ensuring (_ ne null)
getItemset ensuring (_ ne null)
setExternalValue(
if (! isStaticReadonly)
findSelectedItem map (_.externalValue(mustEncodeValues)) orNull
else
findSelectedItem map (i => ItemsetSupport.htmlValue(i.label, getLocationData)) orNull // external value is the label
)
}
// Q: In theory, multiple items could have the same value and therefore be selected, right?
def findSelectedItems: List[Item.ValueNode] =
findSelectedItem.toList
def findSelectedItem: Option[Item.ValueNode] =
boundItemOpt map getCurrentItemValueFromData flatMap { current =>
getItemset.ensuring(_ ne null).allItemsWithValueIterator(reverse = false) collectFirst {
case (item, itemValue) if StaticItemsetSupport.compareSingleItemValues(
dataValue = current,
itemValue = itemValue,
compareAtt = SaxonUtils.attCompare(boundNodeOpt, _),
excludeWhitespaceTextNodes = staticControl.excludeWhitespaceTextNodesForCopy
) => item
}
}
// The current value depends on whether we follow `xf:copy` or `xf:value` semantics
def getCurrentItemValueFromData(boundItem: om.Item): Item.Value[om.NodeInfo] = {
if (staticControl.useCopy)
Right(
boundItem match {
case node: om.NodeInfo => (node child Node).toList
case _ => Nil
}
)
else
Left(getValue)
}
override def translateExternalValue(boundItem: om.Item, externalValue: String): Option[String] = {
val (selectEvents, deselectEvents) =
gatherEventsForExternalValue(getItemset, getCurrentItemValueFromData(boundItem), externalValue)
for (currentEvent <- deselectEvents)
Dispatch.dispatchEvent(currentEvent)
for (currentEvent <- selectEvents)
Dispatch.dispatchEvent(currentEvent)
// Value is updated via `xforms-select`/`xforms-deselect` events
// Q: Could/should this be the case for other controls as well?
None
}
// We take selection controls to be visited as soon as a selection is made, without requiring the field to loose
// the focus, as browsers implementation of focus events on selection controls is inconsistent, and considering the
// field visited on selection is generally what is expected by form authors (see issue #5040)
protected def markVisitedOnSelectDeselect(event: XFormsEvent): Unit =
event match {
case _: XFormsDeselectEvent | _: XFormsSelectEvent => visited = true
case _ => // nop
}
override def performDefaultAction(event: XFormsEvent): Unit = {
event match {
case deselect: XFormsDeselectEvent =>
boundNodeOpt match {
case Some(boundNode) =>
deselect.itemValue match {
case Left(_) =>
DataModel.setValueIfChangedHandleErrors(
eventTarget = selfControl,
locationData = getLocationData,
nodeInfo = boundNode,
valueToSet = "",
source = "select",
isCalculate = false
)
case Right(v) =>
// If the deselected value contains attributes, remove all of those from the bound node
val (atts, other) = StaticItemsetSupport.partitionAttributes(v)
if (atts.nonEmpty)
XFormsAPI.delete(
ref = atts flatMap (att => boundNode att (att.namespaceURI, att.localname))
)
// If the deselected value contains a node that is not an attribute, then clear the element
// content. We could clear the element content no matter what but this enables the use case
// of selecting attributes independently from the element's content.
if (other.nonEmpty)
XFormsAPI.delete(
ref = boundNode child Node
)
}
case None =>
throw new OXFException("Control is no longer bound to a node. Cannot set external value.")
}
case select: XFormsSelectEvent =>
boundNodeOpt match {
case Some(boundNode) =>
select.itemValue match {
case Left(v) =>
DataModel.setValueIfChangedHandleErrors(
eventTarget = selfControl,
locationData = getLocationData,
nodeInfo = boundNode,
valueToSet = v,
source = "select",
isCalculate = false
)
case Right(v) =>
XFormsAPI.delete(
ref = boundNode child Node
)
XFormsAPI.insert(
origin = v,
into = List(boundNode)
)
}
case None =>
throw new OXFException("Control is no longer bound to a node. Cannot set external value.")
}
case _ =>
}
markVisitedOnSelectDeselect(event)
super.performDefaultAction(event)
}
// For XFormsSelectControl
// We should *not* use inheritance this way here!
protected def valueControlPerformDefaultAction(event: XFormsEvent): Unit =
super.performDefaultAction(event)
private def gatherEventsForExternalValue(
itemset : Itemset,
dataValue : Item.Value[om.NodeInfo],
newExternalValue : String
): (List[XFormsSelectEvent], List[XFormsDeselectEvent]) =
itemset.allItemsWithValueIterator(reverse = true).foldLeft((Nil: List[XFormsSelectEvent], Nil: List[XFormsDeselectEvent])) {
case (result @ (selected, deselected), (item, itemValue)) =>
val itemWasSelected =
StaticItemsetSupport.compareSingleItemValues(
dataValue = dataValue,
itemValue = itemValue,
compareAtt = SaxonUtils.attCompare(boundNodeOpt, _),
excludeWhitespaceTextNodes = staticControl.excludeWhitespaceTextNodesForCopy
)
val itemIsSelected =
item.externalValue(mustEncodeValues) == newExternalValue
val getsSelected = ! itemWasSelected && itemIsSelected
val getsDeselected = itemWasSelected && ! itemIsSelected
val newSelected =
if (getsSelected)
new XFormsSelectEvent(selfControl, itemValue) :: selected
else
selected
val newDeselected =
if (getsDeselected)
new XFormsDeselectEvent(selfControl, itemValue) :: deselected
else
deselected
if (getsSelected || getsDeselected)
(newSelected, newDeselected)
else
result // optimization
}
override def getBackCopy: AnyRef = {
val cloned = super.getBackCopy.asInstanceOf[XFormsSelect1Control]
// If we have an itemset, make sure the computed value is used as basis for comparison
cloned.itemsetProperty = new ImmutableControlProperty(itemsetProperty.value())
cloned
}
override def compareExternalUseExternalValue(
previousExternalValue : Option[String],
previousControl : Option[XFormsControl]
): Boolean =
previousControl match {
case Some(other: XFormsSelect1Control) =>
! mustSendItemsetUpdate(other) &&
super.compareExternalUseExternalValue(previousExternalValue, previousControl)
case _ => false
}
private def mustSendItemsetUpdate(otherSelect1Control: XFormsSelect1Control): Boolean =
if (staticControl.staticItemset.isDefined) {
// There is no need to send an update:
//
// 1. Items are static...
// 2. ...and they have been output statically in the HTML page
false
} else if (isStaticReadonly) {
// There is no need to send an update for static readonly controls
false
} else {
// There is a possible change
if (XFormsSingleNodeControl.isRelevant(otherSelect1Control) != isRelevant) {
// Relevance changed
// Here we decide to send an update only if we become relevant, as the client will know that the
// new state of the control is non-relevant and can handle the itemset on the client as it wants.
isRelevant
} else if (! XFormsSingleNodeControl.isRelevant(selfControl)) {
// We were and are non-relevant, no update
false
} else {
// If the itemsets changed, then we need to send an update
otherSelect1Control.getItemset != getItemset
}
}
final override def outputAjaxDiffUseClientValue(
previousValue : Option[String],
previousControl : Option[XFormsValueControl],
content : Option[XMLReceiverHelper => Unit])(implicit
ch : XMLReceiverHelper
): Unit = {
val hasNestedContent =
mustSendItemsetUpdate(previousControl map (_.asInstanceOf[XFormsSelect1Control]) orNull)
val outputNestedContent = (ch: XMLReceiverHelper) => {
ch.startElement("xxf", XXFORMS_NAMESPACE_URI, "itemset", Array[String]())
val itemset = getItemset
if (itemset ne null) {
val result = ItemsetSupport.asJSON(itemset, None, mustEncodeValues, staticControl.excludeWhitespaceTextNodesForCopy, getLocationData)
if (result.nonEmpty)
ch.text(result)
}
ch.endElement()
}
// Output regular diff
super.outputAjaxDiffUseClientValue(
previousValue,
previousControl,
hasNestedContent option outputNestedContent
)
}
// https://github.com/orbeon/orbeon-forms/issues/3383
override def findAriaByControlEffectiveIdWithNs: Option[String] =
super.findAriaByControlEffectiveIdWithNs
// Don't accept focus if we have the internal appearance
override def isDirectlyFocusableMaybeWithToggle: Boolean =
! staticControl.appearances(XXFORMS_INTERNAL_APPEARANCE_QNAME) && super.isDirectlyFocusableMaybeWithToggle
override def supportAjaxUpdates: Boolean =
! staticControl.appearances(XXFORMS_INTERNAL_APPEARANCE_QNAME)
}
object XFormsSelect1Control {
// Get itemset for a selection control given either directly or by id. If the control is null or non-relevant,
// lookup by id takes place and the control must have a static itemset or otherwise `None` is returned.
def getInitialItemset(control: XFormsSelect1Control, staticControl: SelectionControlTrait): Option[Itemset] =
if ((control ne null) && control.isRelevant) {
// Control is there and relevant so just ask it (this will include static itemsets evaluation as well)
control.getItemset.some
} else {
// Control is not there or is not relevant, so use static itemsets
// NOTE: This way we output static itemsets during initialization as well, even for non-relevant controls
staticControl.staticItemset
}
def mustEncodeValues(containingDocument: XFormsContainingDocument, control: SelectionControlTrait): Boolean =
control.mustEncodeValues getOrElse containingDocument.encodeItemValues
} | orbeon/orbeon-forms | xforms-runtime/shared/src/main/scala/org/orbeon/oxf/xforms/control/controls/XFormsSelect1Control.scala | Scala | lgpl-2.1 | 15,562 |
package com.github.kompot.play2sec.authentication.providers
import play.api.mvc.Request
class MyUsernamePasswordAuthProviderShortLiving(app: play.Application) extends MyUsernamePasswordAuthProvider(app) {
override protected def buildLoginAuthUser[A](login: (String, String), request: Request[A]) =
new MyLoginUsernamePasswordAuthUser(login._2, login._1, System.currentTimeMillis + 1)
}
| kompot/play2sec | test/com/github/kompot/play2sec/authentication/providers/MyUsernamePasswordAuthProviderShortLiving.scala | Scala | apache-2.0 | 394 |
// Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.paulbutcher.test.matchers
import org.scalamock._
import org.scalamock.matchers.MatchAny
import org.scalatest.FreeSpec
class MatchAnyTest extends FreeSpec {
"MatchAny should match anything" in {
assert(new MatchAny == 1.0)
assert(new MatchAny == "")
assert(new MatchAny == (0, 42))
assert(new MatchAny == List(1, 2, 3))
}
}
| hypertino/ScalaMock | core_tests/shared/src/test/scala/com/paulbutcher/test/matchers/MatchAnyTest.scala | Scala | mit | 1,547 |
package scalacl
package impl
import scala.reflect.runtime.universe.{ typeTag, TypeTag }
/**
* Getter takes input and output name
*/
case class InlinedSet(defs: String, getter: (String, String) => String, size: String)
/**
* Getter takes input, output name and presence output name.
*/
case class InlinedMap(defs: String, getter: (String, String, String) => String, size: String)
object InlinedCollections {
def convertType[A: TypeTag]: String = typeTag[A].tpe.toString.toLowerCase
def inline(v: Any): String = v.toString
def inlineMapAsSwitch[A: TypeTag, B: TypeTag](mapName: String, map: Map[A, B]): String = {
implicit val comparator = new java.util.Comparator[Comparable[_]] {
override def compare(a: java.lang.Comparable[_], b: java.lang.Comparable[_]): Int = {
a.asInstanceOf[Comparable[AnyRef]].compareTo(b.asInstanceOf[AnyRef])
}
}
val cases = for (key <- map.keys.map(_.asInstanceOf[java.lang.Comparable[_]]).toSeq.sorted) yield {
val value = map(key.asInstanceOf[A])
s"""case ${inline(key)}: *value = ${inline(value)}; break;"""
}
s"""
bool $mapName(const ${convertType[A]} key, const ${convertType[A]} *value) {
switch (key) {
${cases.mkString("\n ")}
default:
return false;
}
return true;
}
"""
}
}
| nativelibs4java/ScalaCL | src/main/scala/scalacl/impl/conversion/InlinedCollections.scala | Scala | bsd-3-clause | 1,362 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScPatternList
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScPatternListImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScPatternListStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 17.07.2009
*/
class ScPatternListElementType[Func <: ScPatternList]
extends ScStubElementType[ScPatternListStub, ScPatternList]("pattern list") {
def serialize(stub: ScPatternListStub, dataStream: StubOutputStream): Unit = {
dataStream.writeBoolean(stub.allPatternsSimple)
}
def createStubImpl[ParentPsi <: PsiElement](psi: ScPatternList, parentStub: StubElement[ParentPsi]): ScPatternListStub = {
new ScPatternListStubImpl(parentStub, this, psi.allPatternsSimple)
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScPatternListStub = {
val patternsSimple = dataStream.readBoolean
new ScPatternListStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, patternsSimple)
}
def indexStub(stub: ScPatternListStub, sink: IndexSink): Unit = {}
def createPsi(stub: ScPatternListStub): ScPatternList = {
new ScPatternListImpl(stub)
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScPatternListElementType.scala | Scala | apache-2.0 | 1,330 |
package play.api.mvc {
import play.api._
import play.api.http.{MediaRange, HeaderNames}
import play.api.i18n.Lang
import play.api.libs.iteratee._
import play.api.libs.Crypto
import scala.annotation._
import scala.util.control.NonFatal
/**
* The HTTP request header. Note that it doesn’t contain the request body yet.
*/
@implicitNotFound("Cannot find any HTTP Request Header here")
trait RequestHeader {
/**
* The request ID.
*/
def id: Long
/**
* The request Tags.
*/
def tags: Map[String,String]
/**
* The complete request URI, containing both path and query string.
*/
def uri: String
/**
* The URI path.
*/
def path: String
/**
* The HTTP method.
*/
def method: String
/**
* The HTTP version.
*/
def version: String
/**
* The parsed query string.
*/
def queryString: Map[String, Seq[String]]
/**
* The HTTP headers.
*/
def headers: Headers
/**
* The client IP address.
*
* If the `X-Forwarded-For` header is present, then this method will return the value in that header
* if either the local address is 127.0.0.1, or if `trustxforwarded` is configured to be true in the
* application configuration file.
*/
def remoteAddress: String
// -- Computed
/**
* Helper method to access a queryString parameter.
*/
def getQueryString(key: String): Option[String] = queryString.get(key).flatMap(_.headOption)
/**
* The HTTP host (domain, optionally port)
*/
lazy val host: String = headers.get(HeaderNames.HOST).getOrElse("")
/**
* The HTTP domain
*/
lazy val domain: String = host.split(':').head
/**
* The Request Langs extracted from the Accept-Language header and sorted by preference (preferred first).
*/
lazy val acceptLanguages: Seq[play.api.i18n.Lang] = {
val langs = acceptHeader(HeaderNames.ACCEPT_LANGUAGE).map(item => (item._1, Lang.get(item._2)))
langs.sortBy(_._1).map(_._2).flatten.reverse
}
/**
* @return The media types list of the request’s Accept header, not sorted in any particular order.
*/
@deprecated("Use acceptedTypes instead", "2.1")
lazy val accept: Seq[String] = {
for {
acceptHeader <- headers.get(HeaderNames.ACCEPT).toSeq
value <- acceptHeader.split(',')
contentType <- value.split(';').headOption
} yield contentType
}
/**
* @return The media types list of the request’s Accept header, sorted by preference (preferred first).
*/
lazy val acceptedTypes: Seq[play.api.http.MediaRange] = {
val mediaTypes = acceptHeader(HeaderNames.ACCEPT).map(item => (item._1, MediaRange(item._2)))
mediaTypes.sorted.map(_._2).reverse
}
/**
* @return The items of an Accept* header, with their q-value.
*/
private def acceptHeader(headerName: String): Seq[(Double, String)] = {
for {
header <- headers.get(headerName).toSeq
value0 <- header.split(',')
value = value0.trim
} yield {
RequestHeader.qPattern.findFirstMatchIn(value) match {
case Some(m) => (m.group(1).toDouble, m.before.toString)
case None => (1.0, value) // “The default value is q=1.”
}
}
}
/**
* Check if this request accepts a given media type.
* @return true if `mimeType` matches the Accept header, otherwise false
*/
def accepts(mimeType: String): Boolean = {
acceptedTypes.isEmpty || acceptedTypes.find(_.accepts(mimeType)).isDefined
}
/**
* The HTTP cookies.
*/
lazy val cookies: Cookies = Cookies(headers.get(play.api.http.HeaderNames.COOKIE))
/**
* Parses the `Session` cookie and returns the `Session` data.
*/
lazy val session: Session = Session.decodeFromCookie(cookies.get(Session.COOKIE_NAME))
/**
* Parses the `Flash` cookie and returns the `Flash` data.
*/
lazy val flash: Flash = Flash.decodeFromCookie(cookies.get(Flash.COOKIE_NAME))
/**
* Returns the raw query string.
*/
lazy val rawQueryString: String = uri.split('?').drop(1).mkString("?")
/**
* Returns the value of the Content-Type header (without the ;charset= part if exists)
*/
lazy val contentType: Option[String] = headers.get(play.api.http.HeaderNames.CONTENT_TYPE).flatMap(_.split(';').headOption).map(_.toLowerCase)
/**
* Returns the charset of the request for text-based body
*/
lazy val charset: Option[String] = headers.get(play.api.http.HeaderNames.CONTENT_TYPE).flatMap(_.split(';').tail.headOption).map(_.toLowerCase.trim).filter(_.startsWith("charset=")).flatMap(_.split('=').tail.headOption)
/**
* Copy the request.
*/
def copy(
id: Long = this.id,
tags: Map[String,String] = this.tags,
uri: String = this.uri,
path: String = this.path,
method: String = this.method,
version: String = this.version,
queryString: Map[String, Seq[String]] = this.queryString,
headers: Headers = this.headers,
remoteAddress: String = this.remoteAddress
): RequestHeader = {
val (_id, _tags, _uri, _path, _method, _version, _queryString, _headers, _remoteAddress) = (id, tags, uri, path, method, version, queryString, headers, remoteAddress)
new RequestHeader {
val id = _id
val tags = _tags
val uri = _uri
val path = _path
val method = _method
val version = _version
val queryString = _queryString
val headers = _headers
val remoteAddress = _remoteAddress
}
}
override def toString = {
method + " " + uri
}
}
object RequestHeader {
// “The first "q" parameter (if any) separates the media-range parameter(s) from the accept-params.”
val qPattern = ";\\\\s*q=([0-9.]+)".r
}
/**
* The complete HTTP request.
*
* @tparam A the body content type.
*/
@implicitNotFound("Cannot find any HTTP Request here")
trait Request[+A] extends RequestHeader {
self =>
/**
* The body content.
*/
def body: A
/**
* Transform the request body.
*/
def map[B](f: A => B): Request[B] = new Request[B] {
def id = self.id
def tags = self.tags
def uri = self.uri
def path = self.path
def method = self.method
def version = self.version
def queryString = self.queryString
def headers = self.headers
def remoteAddress = self.remoteAddress
lazy val body = f(self.body)
}
}
object Request {
def apply[A](rh: RequestHeader, a: A) = new Request[A] {
def id = rh.id
def tags = rh.tags
def uri = rh.uri
def path = rh.path
def method = rh.method
def version = rh.version
def queryString = rh.queryString
def headers = rh.headers
lazy val remoteAddress = rh.remoteAddress
def username = None
val body = a
}
}
/**
* Wrap an existing request. Useful to extend a request.
*/
class WrappedRequest[A](request: Request[A]) extends Request[A] {
def id = request.id
def tags = request.tags
def body = request.body
def headers = request.headers
def queryString = request.queryString
def path = request.path
def uri = request.uri
def method = request.method
def version = request.version
def remoteAddress = request.remoteAddress
}
/**
* The HTTP response.
*/
@implicitNotFound("Cannot find any HTTP Response here")
trait Response {
/**
* Handles a result.
*
* Depending on the result type, it will be sent synchronously or asynchronously.
*/
def handle(result: Result): Unit
}
/**
* Defines a `Call`, which describes an HTTP request and can be used to create links or fill redirect data.
*
* These values are usually generated by the reverse router.
*
* @param method the request HTTP method
* @param url the request URL
*/
case class Call(method: String, url: String) extends play.mvc.Call {
/**
* Transform this call to an absolute URL.
*/
def absoluteURL(secure: Boolean = false)(implicit request: RequestHeader) = {
"http" + (if (secure) "s" else "") + "://" + request.host + this.url
}
/**
* Transform this call to an WebSocket URL.
*/
def webSocketURL(secure: Boolean = false)(implicit request: RequestHeader) = {
"ws" + (if (secure) "s" else "") + "://" + request.host + this.url
}
override def toString = url
}
/**
* The HTTP headers set.
*/
trait Headers {
/**
* Optionally returns the first header value associated with a key.
*/
def get(key: String): Option[String] = getAll(key).headOption
/**
* Retrieves the first header value which is associated with the given key.
*/
def apply(key: String): String = get(key).getOrElse(scala.sys.error("Header doesn't exist"))
/**
* Retrieve all header values associated with the given key.
*/
def getAll(key: String): Seq[String] = (toMap.get(key):Option[Seq[String]]).toSeq.flatten
/**
* Retrieve all header keys
*/
def keys: Set[String] = toMap.keySet
/**
* Transform the Headers to a Map
*/
lazy val toMap: Map[String, Seq[String]] = {
import collection.immutable.TreeMap
import play.core.utils.CaseInsensitiveOrdered
TreeMap(data: _*)(CaseInsensitiveOrdered)
}
protected def data: Seq[(String, Seq[String])]
/**
* Transform the Headers to a Map by ignoring multiple values.
*/
def toSimpleMap: Map[String, String] = keys.map { headerKey =>
(headerKey, apply(headerKey))
}.toMap
override def toString = toMap.toString
}
/**
* Trait that should be extended by the Cookie helpers.
*/
trait CookieBaker[T <: AnyRef] {
/**
* The cookie name.
*/
def COOKIE_NAME: String
/**
* Default cookie, returned in case of error or if missing in the HTTP headers.
*/
def emptyCookie: T
/**
* `true` if the Cookie is signed. Defaults to false.
*/
def isSigned: Boolean = false
/**
* `true` if the Cookie should have the httpOnly flag, disabling access from Javascript. Defaults to true.
*/
def httpOnly = true
/**
* The cookie expiration date in seconds, `None` for a transient cookie
*/
def maxAge: Option[Int] = None
/**
* The cookie domain. Defaults to None.
*/
def domain: Option[String] = None
/**
* `true` if the Cookie should have the secure flag, restricting usage to https. Defaults to false.
*/
def secure = false
/**
* The cookie path.
*/
def path = "/"
/**
* Encodes the data as a `String`.
*/
def encode(data: Map[String, String]): String = {
val encoded = java.net.URLEncoder.encode(data.filterNot(_._1.contains(":")).map(d => d._1 + ":" + d._2).mkString("\\u0000"), "UTF-8")
if (isSigned)
Crypto.sign(encoded) + "-" + encoded
else
encoded
}
/**
* Decodes from an encoded `String`.
*/
def decode(data: String): Map[String, String] = {
def urldecode(data: String) = java.net.URLDecoder.decode(data, "UTF-8").split("\\u0000").map(_.split(":")).map(p => p(0) -> p.drop(1).mkString(":")).toMap
// Do not change this unless you understand the security issues behind timing attacks.
// This method intentionally runs in constant time if the two strings have the same length.
// If it didn't, it would be vulnerable to a timing attack.
def safeEquals(a: String, b: String) = {
if (a.length != b.length) {
false
} else {
var equal = 0
for (i <- Array.range(0, a.length)) {
equal |= a(i) ^ b(i)
}
equal == 0
}
}
try {
if (isSigned) {
val splitted = data.split("-")
val message = splitted.tail.mkString("-")
if (safeEquals(splitted(0), Crypto.sign(message)))
urldecode(message)
else
Map.empty[String, String]
} else urldecode(data)
} catch {
// fail gracefully is the session cookie is corrupted
case NonFatal(_) => Map.empty[String, String]
}
}
/**
* Encodes the data as a `Cookie`.
*/
def encodeAsCookie(data: T): Cookie = {
val cookie = encode(serialize(data))
Cookie(COOKIE_NAME, cookie, maxAge, path, domain, secure, httpOnly)
}
/**
* Decodes the data from a `Cookie`.
*/
def decodeFromCookie(cookie: Option[Cookie]): T = {
cookie.filter(_.name == COOKIE_NAME).map(c => deserialize(decode(c.value))).getOrElse(emptyCookie)
}
def discard = DiscardingCookie(COOKIE_NAME, path, domain, secure)
/**
* Builds the cookie object from the given data map.
*
* @param data the data map to build the cookie object
* @return a new cookie object
*/
protected def deserialize(data: Map[String, String]): T
/**
* Converts the given cookie object into a data map.
*
* @param cookie the cookie object to serialize into a map
* @return a new `Map` storing the key-value pairs for the given cookie
*/
protected def serialize(cookie: T): Map[String, String]
}
/**
* HTTP Session.
*
* Session data are encoded into an HTTP cookie, and can only contain simple `String` values.
*/
case class Session(data: Map[String, String] = Map.empty[String, String]) {
/**
* Optionally returns the session value associated with a key.
*/
def get(key: String) = data.get(key)
/**
* Returns `true` if this session is empty.
*/
def isEmpty: Boolean = data.isEmpty
/**
* Adds a value to the session, and returns a new session.
*
* For example:
* {{{
* session + ("username" -> "bob")
* }}}
*
* @param kv the key-value pair to add
* @return the modified session
*/
def +(kv: (String, String)) = copy(data + kv)
/**
* Removes any value from the session.
*
* For example:
* {{{
* session - "username"
* }}}
*
* @param key the key to remove
* @return the modified session
*/
def -(key: String) = copy(data - key)
/**
* Retrieves the session value which is associated with the given key.
*/
def apply(key: String) = data(key)
}
/**
* Helper utilities to manage the Session cookie.
*/
object Session extends CookieBaker[Session] {
val COOKIE_NAME = Play.maybeApplication.flatMap(_.configuration.getString("session.cookieName")).getOrElse("PLAY_SESSION")
val emptyCookie = new Session
override val isSigned = true
override def secure = Play.maybeApplication.flatMap(_.configuration.getBoolean("session.secure")).getOrElse(false)
override val maxAge = Play.maybeApplication.flatMap(_.configuration.getInt("session.maxAge"))
override val httpOnly = Play.maybeApplication.flatMap(_.configuration.getBoolean("session.httpOnly")).getOrElse(true)
override def path = Play.maybeApplication.flatMap(_.configuration.getString("application.context")).getOrElse("/")
override def domain = Play.maybeApplication.flatMap(_.configuration.getString("session.domain"))
def deserialize(data: Map[String, String]) = new Session(data)
def serialize(session: Session) = session.data
}
/**
* HTTP Flash scope.
*
* Flash data are encoded into an HTTP cookie, and can only contain simple `String` values.
*/
case class Flash(data: Map[String, String] = Map.empty[String, String]) {
/**
* Optionally returns the flash value associated with a key.
*/
def get(key: String) = data.get(key)
/**
* Returns `true` if this flash scope is empty.
*/
def isEmpty: Boolean = data.isEmpty
/**
* Adds a value to the flash scope, and returns a new flash scope.
*
* For example:
* {{{
* flash + ("success" -> "Done!")
* }}}
*
* @param kv the key-value pair to add
* @return the modified flash scope
*/
def +(kv: (String, String)) = copy(data + kv)
/**
* Removes a value from the flash scope.
*
* For example:
* {{{
* flash - "success"
* }}}
*
* @param key the key to remove
* @return the modified flash scope
*/
def -(key: String) = copy(data - key)
/**
* Retrieves the flash value that is associated with the given key.
*/
def apply(key: String) = data(key)
}
/**
* Helper utilities to manage the Flash cookie.
*/
object Flash extends CookieBaker[Flash] {
val COOKIE_NAME = Play.maybeApplication.flatMap(_.configuration.getString("flash.cookieName")).getOrElse("PLAY_FLASH")
override val path = Play.maybeApplication.flatMap(_.configuration.getString("application.context")).getOrElse("/")
val emptyCookie = new Flash
def deserialize(data: Map[String, String]) = new Flash(data)
def serialize(flash: Flash) = flash.data
}
/**
* An HTTP cookie.
*
* @param name the cookie name
* @param value the cookie value
* @param maxAge the cookie expiration date in seconds, `None` for a transient cookie, or a value less than 0 to expire a cookie now
* @param path the cookie path, defaulting to the root path `/`
* @param domain the cookie domain
* @param secure whether this cookie is secured, sent only for HTTPS requests
* @param httpOnly whether this cookie is HTTP only, i.e. not accessible from client-side JavaScipt code
*/
case class Cookie(name: String, value: String, maxAge: Option[Int] = None, path: String = "/", domain: Option[String] = None, secure: Boolean = false, httpOnly: Boolean = true)
/**
* A cookie to be discarded. This contains only the data necessary for discarding a cookie.
*
* @param name the name of the cookie to discard
* @param path the path of the cookie, defaults to the root path
* @param domain the cookie domain
* @param secure whether this cookie is secured
*/
case class DiscardingCookie(name: String, path: String = "/", domain: Option[String] = None, secure: Boolean = false) {
def toCookie = Cookie(name, "", Some(-1), path, domain, secure)
}
/**
* The HTTP cookies set.
*/
trait Cookies {
/**
* Optionally returns the cookie associated with a key.
*/
def get(name: String): Option[Cookie]
/**
* Retrieves the cookie that is associated with the given key.
*/
def apply(name: String): Cookie = get(name).getOrElse(scala.sys.error("Cookie doesn't exist"))
}
/**
* Helper utilities to encode Cookies.
*/
object Cookies {
import scala.collection.JavaConverters._
// We use netty here but just as an API to handle cookies encoding
import org.jboss.netty.handler.codec.http.{ CookieEncoder, CookieDecoder, DefaultCookie }
/**
* Extract cookies from the Set-Cookie header.
*/
def apply(header: Option[String]) = new Cookies {
lazy val cookies: Map[String, Cookie] = header.map(Cookies.decode(_)).getOrElse(Seq.empty).groupBy(_.name).mapValues(_.head)
def get(name: String) = cookies.get(name)
override def toString = cookies.toString
}
/**
* Encodes cookies as a proper HTTP header.
*
* @param cookies the Cookies to encode
* @return a valid Set-Cookie header value
*/
def encode(cookies: Seq[Cookie]): String = {
val encoder = new CookieEncoder(true)
val newCookies = cookies.map{c =>
encoder.addCookie {
val nc = new DefaultCookie(c.name, c.value)
nc.setMaxAge(c.maxAge.getOrElse(Integer.MIN_VALUE))
nc.setPath(c.path)
c.domain.map(nc.setDomain(_))
nc.setSecure(c.secure)
nc.setHttpOnly(c.httpOnly)
nc
}
encoder.encode()
}
newCookies.mkString("; ")
}
/**
* Decodes a Set-Cookie header value as a proper cookie set.
*
* @param cookieHeader the Set-Cookie header value
* @return decoded cookies
*/
def decode(cookieHeader: String): Seq[Cookie] = {
new CookieDecoder().decode(cookieHeader).asScala.map { c =>
Cookie(c.getName, c.getValue, if (c.getMaxAge == Integer.MIN_VALUE) None else Some(c.getMaxAge), Option(c.getPath).getOrElse("/"), Option(c.getDomain), c.isSecure, c.isHttpOnly)
}.toSeq
}
/**
* Merges an existing Set-Cookie header with new cookie values
*
* @param cookieHeader the existing Set-Cookie header value
* @param cookies the new cookies to encode
* @return a valid Set-Cookie header value
*/
def merge(cookieHeader: String, cookies: Seq[Cookie]): String = {
encode(cookies ++ decode(cookieHeader))
}
}
}
| noel-yap/setter-for-catan | play-2.1.1/framework/src/play/src/main/scala/play/api/mvc/Http.scala | Scala | apache-2.0 | 21,119 |
import sbt.Keys._
import sbt._
import Resolvers._
import scoverage.ScoverageKeys._
import org.scoverage.coveralls.Imports.CoverallsKeys._
object Settings {
val organizationName = "com.github.itryapitsin"
val productName = "slick-migration"
val currentVersion = "2.1.0-SNAPSHOT"
val locales = "-Duser.language=en" :: "-Duser.region=us" :: Nil
val scalaVersions = Version.scala :: "2.11.5" :: Nil
val basicSettings = seq(
version := currentVersion,
crossScalaVersions := scalaVersions,
javaOptions in Test ++= locales,
libraryDependencies ++= Dependencies.common,
scalaVersion := Version.scala,
organization := Settings.organizationName,
publishMavenStyle := true,
publishArtifact in Test := false,
coverageFailOnMinimum := false,
coverageEnabled := true,
coverageMinimum := 70,
publishArtifact in Test := false,
parallelExecution in Test := false,
coverallsToken := Option(sys.env("COVERALL_TOKEN")),
pomIncludeRepository := { _ => false },
credentials ++= {
(for {
username <- Option(sys.env("SONATYPE_USERNAME"))
password <- Option(sys.env("SONATYPE_PASSWORD"))
} yield {
Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", username, password)
}).toSeq
},
coverageHighlighting := {
if(scalaBinaryVersion.value == "2.11") true
else false
},
resolvers ++= Seq(
Resolvers.MavenRepository,
SonatypeReleasesRepository,
SonatypeSnapshotsRepository),
publishTo := {
if (isSnapshot.value)
Some(SonatypeSnapshotsRepository)
else
Some(SonatypeReleasesRepository)
},
pomExtra :=
<url>http://jsuereth.com/scala-arm</url>
<licenses>
<license>
<name>Apache</name>
<url>https://raw.githubusercontent.com/itryapitsin/slick-migration/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:itryapitsin/slick-migration.git</url>
<connection>scm:git:git@github.com:itryapitsin/slick-migration.git</connection>
</scm>
<developers>
<developer>
<id>itryapitsin</id>
<name>Iliya Tryapitsin</name>
<email>iliya.tryapitsin@gmail.com</email>
<url>https://github.com/itryapitsin</url>
</developer>
</developers>
)
def getSettings(customSettings: Setting[_]*): Seq[Setting[_]] = basicSettings ++ customSettings
} | itryapitsin/slick-migration | project/Settings.scala | Scala | apache-2.0 | 2,656 |
package flyweight
trait Platform {
def execute(code: Code): Unit
}
| BBK-PiJ-2015-67/sdp-portfolio | exercises/week11/src/main/scala/flyweight/Platform.scala | Scala | unlicense | 70 |
package org.firedancer3d.scenegraph.sensors
class TouchSensor {
} | cyberthinkers/FireDancer3D | firedancer3d_shared/src/main/scala/org/firedancer3d/scenegraph/sensors/TouchSensor.scala | Scala | mit | 71 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.csv
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import com.databricks.spark.csv.util.{ParserLibs, TextFile, TypeCast}
/**
* Provides access to CSV data from pure SQL statements (i.e. for users of the
* JDBC server).
*/
class DefaultSource
extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider {
private def checkPath(parameters: Map[String, String]): String = {
parameters.getOrElse("path", sys.error("'path' must be specified for CSV data."))
}
/**
* Creates a new relation for data store in CSV given parameters.
* Parameters have to include 'path' and optionally 'delimiter', 'quote', and 'header'
*/
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]) = {
createRelation(sqlContext, parameters, null)
}
/**
* Creates a new relation for data store in CSV given parameters and user supported schema.
* Parameters have to include 'path' and optionally 'delimiter', 'quote', and 'header'
*/
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType) = {
val path = checkPath(parameters)
val delimiter = TypeCast.toChar(parameters.getOrElse("delimiter", ","))
val quote = parameters.getOrElse("quote", "\\"")
val quoteChar = if (quote.length == 1) {
quote.charAt(0)
} else {
throw new Exception("Quotation cannot be more than one character.")
}
val escape = parameters.getOrElse("escape", null)
val escapeChar: Character = if (escape == null) {
null
} else if (escape.length == 1) {
escape.charAt(0)
} else {
throw new Exception("Escape character cannot be more than one character.")
}
val comment = parameters.getOrElse("comment", "#")
val commentChar: Character = if (comment == null) {
null
} else if (comment.length == 1) {
comment.charAt(0)
} else {
throw new Exception("Comment marker cannot be more than one character.")
}
val parseMode = parameters.getOrElse("mode", "PERMISSIVE")
val useHeader = parameters.getOrElse("header", "false")
val headerFlag = if (useHeader == "true") {
true
} else if (useHeader == "false") {
false
} else {
throw new Exception("Header flag can be true or false")
}
val parserLib = parameters.getOrElse("parserLib", ParserLibs.DEFAULT)
val ignoreLeadingWhiteSpace = parameters.getOrElse("ignoreLeadingWhiteSpace", "false")
val ignoreLeadingWhiteSpaceFlag = if (ignoreLeadingWhiteSpace == "false") {
false
} else if (ignoreLeadingWhiteSpace == "true") {
if (!ParserLibs.isUnivocityLib(parserLib)) {
throw new Exception("Ignore whitesspace supported for Univocity parser only")
}
true
} else {
throw new Exception("Ignore white space flag can be true or false")
}
val ignoreTrailingWhiteSpace = parameters.getOrElse("ignoreTrailingWhiteSpace", "false")
val ignoreTrailingWhiteSpaceFlag = if (ignoreTrailingWhiteSpace == "false") {
false
} else if (ignoreTrailingWhiteSpace == "true") {
if (!ParserLibs.isUnivocityLib(parserLib)) {
throw new Exception("Ignore whitespace supported for the Univocity parser only")
}
true
} else {
throw new Exception("Ignore white space flag can be true or false")
}
val charset = parameters.getOrElse("charset", TextFile.DEFAULT_CHARSET.name())
// TODO validate charset?
val inferSchema = parameters.getOrElse("inferSchema", "false")
val inferSchemaFlag = if (inferSchema == "false") {
false
} else if (inferSchema == "true") {
true
} else {
throw new Exception("Infer schema flag can be true or false")
}
val lineParsingOpts = LineParsingOpts(parameters)
val realNumParsingOpts = RealNumberParsingOpts(parameters)
val intNumParsingOpts = IntNumberParsingOpts(parameters)
val stringParsingOpts = StringParsingOpts(parameters)
val csvParsingOpts = if (!parameters.exists { case (k, v) =>
k.startsWith("csvParsingOpts.")
}) {
CSVParsingOpts(delimiter = delimiter,
quoteChar = quoteChar,
escapeChar = escapeChar,
ignoreLeadingWhitespace = ignoreLeadingWhiteSpaceFlag,
ignoreTrailingWhitespace = ignoreTrailingWhiteSpaceFlag)
} else {
CSVParsingOpts(parameters)
}
CsvRelation(path,
useHeader = headerFlag,
csvParsingOpts = csvParsingOpts,
lineExceptionPolicy = lineParsingOpts,
realNumOpts = realNumParsingOpts,
intNumOpts = intNumParsingOpts,
stringParsingOpts = stringParsingOpts,
parseMode = parseMode,
parserLib = parserLib,
userSchema = schema,
comment = commentChar,
charset = charset,
inferCsvSchema = inferSchemaFlag)(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val path = checkPath(parameters)
val filesystemPath = new Path(path)
val fs = filesystemPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)
val doSave = if (fs.exists(filesystemPath)) {
mode match {
case SaveMode.Append =>
sys.error(s"Append mode is not supported by ${this.getClass.getCanonicalName}")
case SaveMode.Overwrite =>
fs.delete(filesystemPath, true)
true
case SaveMode.ErrorIfExists =>
sys.error(s"path $path already exists.")
case SaveMode.Ignore => false
}
} else {
true
}
if (doSave) {
// Only save data when the save mode is not ignore.
data.saveAsCsvFile(path, parameters)
}
createRelation(sqlContext, parameters, data.schema)
}
}
| mohitjaggi/spark-csv | src/main/scala/com/databricks/spark/csv/DefaultSource.scala | Scala | apache-2.0 | 6,766 |
package com.github.gdefacci.briscola
import com.github.gdefacci.bdd.Features
import com.github.gdefacci.bdd.Feature
object CompetitionFeatures extends Features with CompetitionSteps {
lazy val `a player can't start a competition with (her/his)self` = scenario(
`given an initial application state`
And `create player`("name", "password")
Then cant(`player starts match`("name", Seq("name"))))
lazy val `a player can start a competition` = scenario(
`given an initial application state`
And `create player`("name", "password")
And `create player`("name1", "password")
Then can(`player starts match`("name", Seq("name1")))
And `player received message`("name1", `a valid created competition event`))
lazy val `a player can accept a competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts match`("name", Seq("name1", "name2"))
And `player accept the competition`("name1")
And `player received message`("name", `an accepted competition event`))
lazy val `when every one has accepted the competition the game starts` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1"))
And `player starts match`("name", Seq("name1"))
Then can(`player accept the competition`("name1"))
And `all players received message`(`a game started event`) )
lazy val `a player can't accept an already accepted competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts match`("name", Seq("name1", "name2"))
And `player accept the competition`("name1")
But cant(`player accept the competition`("name1")))
lazy val `a player can decline a competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts match`("name", Seq("name1", "name2"))
And can(`player decline the competition`("name1"))
And `player received message`("name", `a declined competition event`))
lazy val `a player can't decline an already declined competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts match`("name", Seq("name1", "name2"))
And can(`player decline the competition`("name1"))
And cant(`player decline the competition`("name1")))
lazy val `a player can decline and later accept a competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts a "on player count" match`("name", Seq("name1", "name2"), playerCount = 2)
And can(`player decline the competition`("name1"))
And can(`player accept the competition`("name1")))
lazy val `a player can accept and later decline a competition` = scenario(
`given an initial application state`
And `given initial players`(List("name", "name1", "name2"))
Then `player starts a "on player count" match`("name", Seq("name1", "name2"), playerCount = 2)
And can(`player decline the competition`("name1"))
And can(`player accept the competition`("name1")))
lazy val features = new Feature("game features",
`a player can start a competition`,
`a player can't start a competition with (her/his)self`,
`a player can accept a competition`,
`when every one has accepted the competition the game starts`,
`a player can decline a competition`,
`a player can't accept an already accepted competition`,
`a player can't decline an already declined competition`,
`a player can accept and later decline a competition`,
`a player can decline and later accept a competition`) :: Nil
} | gdefacci/briscola | ddd-briscola-web/src/test/scala/com/github/gdefacci/briscola/CompetitionFeatures.scala | Scala | bsd-3-clause | 3,919 |
// -*- mode: Scala;-*-
// Filename: Membrane.scala
// Authors: lgm
// Creation: Mon Apr 25 10:20:38 2011
// Copyright: Not supplied
// Description:
// ------------------------------------------------------------------------
package com.biosimilarity.lift.lib.monad
trait ForNotationShiv[Shape[_],A] {
self : BMonad[Shape] with MonadFilter[Shape] =>
type ForNotationTrampoline[A] <: Membrane[A] with Filter[A]
// One approach to trampolining to Scala's for-notation is
// presented below. We provide an Option-like structure, called
// Membrane, which represents the basic interface to
// for-notation. Then we provide the monadic layer in terms of
// this.
trait Membrane[+A] {
def flatMap [B] ( f : A => Membrane[B] ) : Membrane[B]
def foreach ( f : A => Unit ) : Unit
def map [B] ( f : A => B ) : Membrane[B]
}
// For adding for-notation filter behavior to the mix
trait Filter[+A] {
self : Membrane[A] =>
def withFilter( pred : A => Boolean ) : Membrane[A] with Filter[A]
def filter( pred : A => Boolean ) : Membrane[A] with Filter[A] = {
withFilter( pred )
}
}
case object Open extends Membrane[Nothing] with Filter[Nothing] {
override def flatMap [B] ( f : Nothing => Membrane[B] ) : Membrane[B] = {
this
}
override def foreach ( f : Nothing => Unit ) : Unit = {
}
override def map [B] ( f : Nothing => B ) : Membrane[B] = {
this
}
override def withFilter(
pred : Nothing => Boolean
) : Membrane[Nothing] with Filter[Nothing] = {
this
}
}
case class Cell[+A]( a : A ) extends Membrane[A] with Filter[A] {
override def flatMap [B] ( f : A => Membrane[B] ) : Membrane[B] = {
for( b <- f( a ) ) yield { b }
}
override def foreach ( f : A => Unit ) : Unit = {
f( a )
}
override def map [B] ( f : A => B ) : Membrane[B] = {
Cell( f( a ) )
}
def withFilter( pred : A => Boolean ) : Membrane[A] with Filter[A] = {
pred( a ) match {
case true => this
case false => Open
}
}
}
// Up to verification of the monad laws, this verifies that Membrane
// defines a monad, itself. In some sense all monads factor through
// some kind of wrapper-like structure, i.e. they reflect
// computation into a datum.
class MembraneMonad[A]( )
extends BMonad[Membrane] {
override def unit [S] ( s : S ) : Membrane[S] =
Cell[S]( s )
override def bind [S,T] (
ms : Membrane[S],
f : S => Membrane[T]
) : Membrane[T] = {
for( s <- ms; t <- f( s ) ) yield { t }
}
}
// Now, we special case the wrapping in Membrane's of Shape's -- for
// which we have provided a monadic interpretation -- as witnessed
// by the self-type above.
class SCell[A]( val sa : Shape[A] )
extends Membrane[A] with Filter[A]
{
override def flatMap [B] ( f : A => Membrane[B] ) : Membrane[B] = {
SCell[B](
bind[A,B](
sa,
( a : A ) => {
f( a ) match {
case Open => throw new Exception( "Encountered open cell" )
case SCell( sb : Shape[B] ) => sb
case Cell( b ) => unit[B]( b )
}
}
)
)
}
override def foreach ( f : A => Unit ) : Unit = {
bind[A,Unit]( sa, ( a : A ) => unit( f( a ) ) );
}
override def map [B] ( f : A => B ) : Membrane[B] = {
SCell[B](
bind[Shape[A],B](
unit[Shape[A]]( sa ),
fmap[A,B]( f )
)
)
}
def withFilter( pred : A => Boolean ) : Membrane[A] with Filter[A] = {
SCell[A]( mfilter[A]( sa, pred ) )
}
override def equals( o : Any ) : Boolean = {
o match {
case that : SCell[A] => {
sa.equals( that.sa )
}
case _ => false
}
}
override def hashCode( ) : Int = {
37 * sa.hashCode
}
}
object SCell {
def apply [A] ( sa : Shape[A] ) : SCell[A] = {
SCell[A]( sa )
}
def unapply [A] ( sca : SCell[A] ) : Option[( Shape[A] )] = {
Some( ( sca.sa ) )
}
}
}
trait ForNotationApplyShiv[Shape[_],A] {
self : ForNotationShiv[Shape,A] =>
def apply [A] ( sa : Shape[A] ) : ForNotationTrampoline[A]
}
trait ForNotationStdApplyShiv[Shape[_],A] {
self : ForNotationShiv[Shape,A] =>
type ForNotationTrampoline[A] = SCell[A]
def apply [A] ( sa : Shape[A] ) : ForNotationTrampoline[A] = {
new self.SCell[A]( sa )
}
}
trait ForNotationImplicitsShiv[Shape[_],A] {
self : ForNotationShiv[Shape,A] =>
type ForNotationTrampoline[A] = SCell[A]
// Next, we provide some useful implicits:
// One to enclose Shape's in Membrane's ...
implicit def toMembrane [A] (
s : Shape[A]
) : ForNotationTrampoline[A] = {
self.SCell[A]( s )
}
// ... and one to open the enclosure
implicit def toShape [A] ( s : Membrane[A] ) : Shape[A] = {
s match {
case self.SCell( sa : Shape[A] ) => sa
case _ => throw new Exception( "value escaping enclosure" )
}
}
}
trait ForNotationAdapter[Shape[_],A]
extends ForNotationShiv[Shape,A]
with ForNotationImplicitsShiv[Shape,A] {
self : BMonad[Shape] with MonadFilter[Shape] =>
}
trait FNMonadT[T[M[_],_],M[_],A]
extends MonadT[T,M] {
trait TMSMA[A]
extends ForNotationAdapter[TM,A]
with BMonad[TM]
with MonadFilter[TM]
with MonadPlus[TM]
def tmsma : TMSMA[A]
}
| leithaus/strategies | src/main/scala/com/biosimilarity/lib/monad/Membrane.scala | Scala | cc0-1.0 | 5,421 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.text
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
class TokenizerSpec extends FlatSpec with Matchers with BeforeAndAfter {
val text = "Hello my friend, please annotate my text2"
val feature = TextFeature(text)
"Tokenizer and Normalizer" should "work properly" in {
val tokenizer = Tokenizer()
val tokenized = tokenizer.transform(feature)
require(tokenized.keys().contains("tokens"))
require(tokenized.getTokens.sameElements(Array("Hello", "my",
"friend,", "please", "annotate", "my", "text2")))
val normalizer = Normalizer()
val normalized = normalizer.transform(tokenized)
require(normalized.keys().contains("tokens"))
require(normalized.getTokens.sameElements(Array("hello", "my",
"friend", "please", "annotate", "my", "text")))
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/feature/text/TokenizerSpec.scala | Scala | apache-2.0 | 1,452 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
package classpath
import java.lang.ref.{Reference, SoftReference, WeakReference}
import java.io.File
import java.net.{URI, URL, URLClassLoader}
import java.util.Collections
import scala.collection.{mutable, JavaConversions, Set}
import mutable.{HashSet, ListBuffer}
import IO.{createTemporaryDirectory, write}
object ClasspathUtilities
{
def toLoader(finder: PathFinder): ClassLoader = toLoader(finder, rootLoader)
def toLoader(finder: PathFinder, parent: ClassLoader): ClassLoader = new URLClassLoader(finder.getURLs, parent)
def toLoader(paths: Seq[File]): ClassLoader = toLoader(paths, rootLoader)
def toLoader(paths: Seq[File], parent: ClassLoader): ClassLoader = new URLClassLoader(Path.toURLs(paths), parent)
def toLoader(paths: Seq[File], parent: ClassLoader, resourceMap: Map[String,String]): ClassLoader =
new URLClassLoader(Path.toURLs(paths), parent) with RawResources { override def resources = resourceMap }
def toLoader(paths: Seq[File], parent: ClassLoader, resourceMap: Map[String,String], nativeTemp: File): ClassLoader =
new URLClassLoader(Path.toURLs(paths), parent) with RawResources with NativeCopyLoader {
override def resources = resourceMap
override val config = new NativeCopyConfig(nativeTemp, paths, javaLibraryPaths)
}
def javaLibraryPaths: Seq[File] = IO.parseClasspath(System.getProperty("java.library.path"))
lazy val rootLoader =
{
def parent(loader: ClassLoader): ClassLoader =
{
val p = loader.getParent
if(p eq null) loader else parent(p)
}
parent(getClass.getClassLoader)
}
final val AppClassPath = "app.class.path"
final val BootClassPath = "boot.class.path"
def createClasspathResources(classpath: Seq[File], instance: ScalaInstance): Map[String,String] =
createClasspathResources(classpath ++ instance.jars, instance.jars)
def createClasspathResources(appPaths: Seq[File], bootPaths: Seq[File]): Map[String, String] =
{
def make(name: String, paths: Seq[File]) = name -> Path.makeString(paths)
Map( make(AppClassPath, appPaths), make(BootClassPath, bootPaths) )
}
def makeLoader[T](classpath: Seq[File], instance: ScalaInstance): ClassLoader =
makeLoader(classpath, instance.loader, instance)
def makeLoader[T](classpath: Seq[File], parent: ClassLoader, instance: ScalaInstance): ClassLoader =
toLoader(classpath, parent, createClasspathResources(classpath, instance))
def makeLoader[T](classpath: Seq[File], parent: ClassLoader, instance: ScalaInstance, nativeTemp: File): ClassLoader =
toLoader(classpath, parent, createClasspathResources(classpath, instance), nativeTemp)
private[sbt] def printSource(c: Class[_]) =
println(c.getName + " loader=" +c.getClassLoader + " location=" + IO.classLocationFile(c))
def isArchive(file: File): Boolean = isArchiveName(file.getName)
def isArchiveName(fileName: String) = fileName.endsWith(".jar") || fileName.endsWith(".zip")
// Partitions the given classpath into (jars, directories)
private[sbt] def separate(paths: Iterable[File]): (Iterable[File], Iterable[File]) = paths.partition(isArchive)
// Partitions the given classpath into (jars, directories)
private[sbt] def buildSearchPaths(classpath: Iterable[File]): (collection.Set[File], collection.Set[File]) =
{
val (jars, dirs) = separate(classpath)
(linkedSet(jars ++ extraJars), linkedSet(dirs ++ extraDirs))
}
private[sbt] def onClasspath(classpathJars: collection.Set[File], classpathDirectories: collection.Set[File], file: File): Boolean =
{
val f = file.getCanonicalFile
if(ClasspathUtilities.isArchive(f))
classpathJars(f)
else
classpathDirectories.toList.find(Path.relativize(_, f).isDefined).isDefined
}
/** Returns all entries in 'classpath' that correspond to a compiler plugin.*/
private[sbt] def compilerPlugins(classpath: Seq[File]): Iterable[File] =
{
import collection.JavaConversions._
val loader = new URLClassLoader(Path.toURLs(classpath))
loader.getResources("scalac-plugin.xml").toList.flatMap(asFile(true))
}
/** Converts the given URL to a File. If the URL is for an entry in a jar, the File for the jar is returned. */
private[sbt] def asFile(url: URL): List[File] = asFile(false)(url)
private[sbt] def asFile(jarOnly: Boolean)(url: URL): List[File] =
{
try
{
url.getProtocol match
{
case "file" if !jarOnly=> IO.toFile(url) :: Nil
case "jar" =>
val path = url.getPath
val end = path.indexOf('!')
new File(new URI(if(end == -1) path else path.substring(0, end))) :: Nil
case _ => Nil
}
}
catch { case e: Exception => Nil }
}
private lazy val (extraJars, extraDirs) =
{
import scala.tools.nsc.GenericRunnerCommand
val settings = (new GenericRunnerCommand(Nil, message => error(message))).settings
val bootPaths = IO.pathSplit(settings.bootclasspath.value).map(p => new File(p)).toList
val (bootJars, bootDirs) = separate(bootPaths)
val extJars =
{
val buffer = new ListBuffer[File]
def findJars(dir: File)
{
buffer ++= dir.listFiles(new SimpleFileFilter(isArchive))
for(dir <- dir.listFiles(DirectoryFilter))
findJars(dir)
}
for(path <- IO.pathSplit(settings.extdirs.value); val dir = new File(path) if dir.isDirectory)
findJars(dir)
buffer.readOnly.map(_.getCanonicalFile)
}
(linkedSet(extJars ++ bootJars), linkedSet(bootDirs))
}
private def linkedSet[T](s: Iterable[T]): Set[T] =
{
val set: mutable.Set[T] = JavaConversions.asScalaSet(new java.util.LinkedHashSet[T])
set ++= s
set
}
}
| kuochaoyi/xsbt | util/classpath/ClasspathUtilities.scala | Scala | bsd-3-clause | 5,570 |
/*
*
* o o o o o
* | o | |\\ /| | /
* | o-o o--o o-o oo | | O | oo o-o OO o-o o o
* | | | | | | | | | | | | | | | | \\ | | \\ /
* O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
* |
* o--o
* o--o o o--o o o
* | | | | o | |
* O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
* | \\ | | | | | | | | | | | | | |-' | | | \\
* o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
*
* Logical Markov Random Fields (LoMRF).
*
*
*/
package lomrf.mln.learning.structure
import lomrf.logic.{ AtomSignature, Clause, FALSE, TRUE }
import lomrf.mln.grounding.MRFBuilder
import lomrf.mln.inference.ILP
import lomrf.mln.learning.structure.ClauseConstructor.ClauseType
import lomrf.mln.learning.structure.ClauseConstructor.ClauseType._
import lomrf.mln.learning.structure.hypergraph.HyperGraph
import lomrf.mln.model.mrf.{ MRF, MRFState }
import lomrf.mln.model._
import lomrf.logic.AtomSignatureOps._
import lomrf.mln.learning.TrainingEvidence
import lomrf.util.time._
import lomrf.util.logging.Implicits._
import optimus.optimization.enums.SolverLib
import scala.util.{ Failure, Success }
/**
* This is an implementation of online structure learning algorithm for structure and parameter estimation in
* Markov Logic Networks. Details about the online algorithm for MLNs can be found in the following publications:
*
* <ul>
* <li> Tuyen N. Huynh and Raymond J. Mooney. Online Structure Learning for Markov Logic Networks (2011)
* In Proceedings of the European Conference on Machine Learning and Principles and Practice of Knowledge Discovery
* in Databases (ECML-PKDD 2011), Vol. 2, pp. 81-96, September 2011.
* The paper can be found in [[http://www.cs.utexas.edu/users/ai-lab/?huynh:ecml11]]
* </li>
*
* @param kb knowledge base definition used for learning clauses
* @param constants constant domain of the knowledge base
* @param nonEvidenceAtoms set of non evidence atoms
* @param modes mode declarations to guide the search
* @param maxLength maximum length of a path
* @param allowFreeVariables allow learned clauses to have free variables e.g. variables appearing only once
* @param threshold evaluation threshold for each new clause produced
* @param clauseType types of clauses to be produced [[lomrf.mln.learning.structure.ClauseConstructor.ClauseType]]
* @param ilpSolver solver type selection option for ILP inference
* @param lossAugmented use loss augmented inference
* @param lambda regularization parameter for AdaGrad online learner
* @param eta learning rate parameter for AdaGrad online learner
* @param delta delta parameter for AdaGrad (should be positive or equal zero)
* @param printLearnedWeightsPerIteration print learned weights for each iteration
* @param backgroundClauses existing background theory (initial set of clauses)
*/
final class OSL private (kb: KB, constants: ConstantsDomain, nonEvidenceAtoms: Set[AtomSignature],
modes: ModeDeclarations, maxLength: Int, allowFreeVariables: Boolean,
threshold: Int, clauseType: ClauseType, ilpSolver: SolverLib, lossAugmented: Boolean,
lambda: Double, eta: Double, delta: Double, printLearnedWeightsPerIteration: Boolean,
backgroundClauses: Vector[Clause]) extends StructureLearner {
// Current training step
private var step: Int = 0
// Initially learned clauses are only the background clauses, it can be empty
private var learnedClauses: Vector[Clause] = backgroundClauses
// Initially all weights for the background clauses are zero
private var weights: Array[Double] = Array.fill[Double](backgroundClauses.length)(0.0)
// Sum of square gradients for each clause over all steps
private var sumSquareGradients = Array.fill[Int](backgroundClauses.length)(0)
// Previous inferred MRF state
private var previousMRFState: Option[MRFState] = None
// Knowledge base definition used for learning clauses
override protected val knowledgeBase: KB = kb
// Tolerance threshold for discarding poor clauses at the end of learning
override protected val tolerance: Double = 0.0
/**
* @return a vector of learned clauses
*/
override def getLearnedClauses: Vector[Clause] = learnedClauses
/**
* @return a vector of learned weights
*/
override def getLearnedWeights: Array[Double] = weights
/**
* Find and return all misclassified true ground atoms as false in the previous
* inferred state. At the initial step of the algorithm assume that everything in
* the state is false.
*
* @param annotationDB annotation over the non evidence atoms
*
* @return all misclassified true ground atoms as false
*/
private def calculateError(annotationDB: EvidenceDB): Vector[Int] = {
var totalError = 0.0
var misclassifiedTrueAtomIDs = Vector[Int]()
val numberOfExamples = annotationDB.values.map(db => db.identity.indices.length).sum
logger.info("Calculating misclassified loss...")
previousMRFState match {
case Some(state) =>
val atoms = state.mrf.atoms
assert(numberOfExamples == atoms.size)
val iterator = atoms.iterator()
while (iterator.hasNext) {
iterator.advance()
val atom = iterator.value()
val annotation = annotationDB(atom.id.signature(state.mrf.mln))(atom.id)
if ((atom.state && annotation == FALSE) || (!atom.state && annotation == TRUE)) {
if (annotation == TRUE) misclassifiedTrueAtomIDs :+= atom.id
totalError += 1.0
}
}
case None =>
misclassifiedTrueAtomIDs =
annotationDB.values.flatMap(db => db.identity.indices.filter(db.get(_) == TRUE)).toVector
totalError = misclassifiedTrueAtomIDs.length
}
logger.info("Total inferred error: " + totalError + "/" + numberOfExamples)
misclassifiedTrueAtomIDs
}
/**
* Perform inference using the ILP solver and return the inferred state.
*/
@inline private def infer(mrf: MRF, annotationDB: EvidenceDB): MRFState = {
mrf.updateConstraintWeights(weights)
val solver = if (lossAugmented) ILP(mrf, ilpSolver, annotationDB = Some(annotationDB)) else ILP(mrf, ilpSolver)
solver.infer
}
/**
* Should revise the current theory and return clauses learned for this
* training evidence as a vector of clauses.
*
* @param trainingEvidence the training evidence (includes annotation)
*
* @return a vector of learned clauses for the given training evidence
*/
override def reviseTheory(trainingEvidence: TrainingEvidence): Vector[Clause] = {
// Increment training step
step += 1
var trueCounts = Array[Int]()
var inferredCounts = Array[Int]()
//val (mln, annotationDB) = MLN.forLearning(kb.schema, trainingEvidence, nonEvidenceAtoms, learnedClauses)
val mln = MLN(kb.schema, trainingEvidence.getEvidence, nonEvidenceAtoms, learnedClauses)
val annotationDB = trainingEvidence.getAnnotation
logger.info(s"AnnotationDB: \\n\\tAtoms with annotations: ${annotationDB.keys.mkString(",")}")
logger.info(mln.toString)
// In case there is no initial set of clauses
logger.info("Creating MRF...")
previousMRFState =
if (mln.clauses.nonEmpty) {
val mrf = new MRFBuilder(mln, createDependencyMap = true).buildNetwork
val state = MRFState(mrf)
state.setAnnotatedState(annotationDB)
trueCounts ++= state.countTrueGroundings
logger.debug("True Counts: [" + trueCounts.deep.mkString(", ") + "]")
val inferredState = infer(mrf, annotationDB)
inferredCounts ++= inferredState.countTrueGroundings
logger.debug("Inferred Counts: [" + inferredCounts.deep.mkString(", ") + "]")
Some(inferredState)
} else {
logger.warn("MRF cannot be created, because no clauses were found!")
None
}
val misclassifiedTrueAtomIDs = calculateError(annotationDB)
logger.info(s"Total misclassified true ground atoms as false: ${misclassifiedTrueAtomIDs.length}")
// Construct hypergraph
val HG = HyperGraph(mln, mln.evidence.db, annotationDB, modes)
logger.info(s"Hypergraph has ${HG.numberOfNodes} nodes (constants) and ${HG.numberOfEdges} edges (true ground atoms)")
logger.debug(s"Hypergraph Structure:\\n$HG")
// Search for paths using relational pathfinding
val (pathFindingRuntime, paths) = measureTime { HG.findPaths(misclassifiedTrueAtomIDs, maxLength, allowFreeVariables) }
logger.info(s"'Relational Pathfinding': ${paths.size} paths found in ${msecTimeToText(pathFindingRuntime)}")
logger.debug(s"Paths:\\n${paths.map(_.toText(mln)).mkString("\\n")}")
// Create clauses from paths
val (createClausesRuntime, resultedClauses) = measureTime {
ClauseConstructor.clauses(paths, mln.schema.predicates, modes, mln.evidence, clauseType, learnedClauses)
}
val clauses = resultedClauses match {
case Success(result) =>
logger.info(s"'Clause Creation': ${result.size} clause(s) extracted from paths in ${msecTimeToText(createClausesRuntime)}")
logger.info(s"Extracted Clauses:\\n${result.map(_.toText()).mkString("\\n")}")
result
case Failure(exception) => logger.fatal(exception.getMessage)
}
// Evaluate clauses
val (goodClauses, subgradientsOfGoodClauses) =
Evaluator.evaluateClauses(clauses, mln.schema, mln.space, mln.evidence, annotationDB, threshold, previousMRFState)
logger.info(s"'Clause Evaluation': ${goodClauses.size} clause(s) remained")
logger.info(s"Remained Clauses:\\n${goodClauses.map(_.toText()).mkString("\\n")}")
/*
* Update weights of the already learned clauses
*/
val subgradientsOfLearnedClauses = Array.fill[Int](mln.clauses.size)(0)
for (clauseIdx <- mln.clauses.indices) if (!mln.clauses(clauseIdx).isHard) {
subgradientsOfLearnedClauses(clauseIdx) = inferredCounts(clauseIdx) - trueCounts(clauseIdx)
}
var clauseIdx = 0
while (clauseIdx < learnedClauses.length) {
sumSquareGradients(clauseIdx) += subgradientsOfLearnedClauses(clauseIdx) * subgradientsOfLearnedClauses(clauseIdx)
val coefficient = eta / (this.delta + math.sqrt(sumSquareGradients(clauseIdx)))
val value = weights(clauseIdx) - coefficient * subgradientsOfLearnedClauses(clauseIdx)
val difference = math.abs(value) - (lambda * coefficient)
if (difference > 0)
weights(clauseIdx) = if (value >= 0) difference else -difference
else weights(clauseIdx) = 0.0
clauseIdx += 1
}
/*
* Learn weights for the remained good clauses.
*/
clauseIdx = 0
while (clauseIdx < goodClauses.length) {
weights :+= 0.0
sumSquareGradients :+= subgradientsOfGoodClauses(clauseIdx) * subgradientsOfGoodClauses(clauseIdx)
val coefficient = eta / (this.delta + math.sqrt(sumSquareGradients(learnedClauses.length + clauseIdx)))
val value = -coefficient * subgradientsOfGoodClauses(clauseIdx)
val difference = math.abs(value) - (lambda * coefficient)
if (difference > 0)
weights(learnedClauses.length + clauseIdx) = if (value >= 0) difference else -difference
clauseIdx += 1
}
// Append all the remained good clauses to the learned clauses
learnedClauses ++= goodClauses
if (printLearnedWeightsPerIteration) {
logger.info("Learned weights on step " + (step + 1) + ":\\n" +
"\\t" + weights.deep.mkString("[", ", ", "]"))
}
goodClauses
}
}
/**
* Factory for OSL algorithm
*/
object OSL {
/**
* Create and OSL object given an initial knowledge base, non evidence atoms and OSL parameters.
*
* @param kb knowledge base definition used for learning clauses
* @param constants constant domain of the knowledge base
* @param modes mode declarations to guide the search
* @param maxLength maximum length of a path
* @param allowFreeVariables allow learned clauses to have free variables e.g. variables appearing only once
* @param threshold evaluation threshold for each new clause produced
* @param clauseType types of clauses to be produced [[lomrf.mln.learning.structure.ClauseConstructor.ClauseType]]
* @param ilpSolver solver type selection option for ILP inference (default is LpSolve)
* @param lossAugmented use loss augmented inference (default is false)
* @param lambda regularization parameter for AdaGrad online learner (default is 0.01)
* @param eta learning rate parameter for AdaGrad online learner (default is 1.0)
* @param delta delta parameter for AdaGrad (should be positive or equal zero, default is 1.0)
* @param printLearnedWeightsPerIteration print learned weights for each iteration (default is false)
*
* @return an instance of OSL learner
*/
def apply(kb: KB, constants: ConstantsDomain, nonEvidenceAtoms: Set[AtomSignature], modes: ModeDeclarations,
maxLength: Int, allowFreeVariables: Boolean, threshold: Int, clauseType: ClauseType = ClauseType.BOTH,
ilpSolver: SolverLib = SolverLib.LpSolve, lossAugmented: Boolean = false, lambda: Double = 0.01,
eta: Double = 1.0, delta: Double = 1.0, printLearnedWeightsPerIteration: Boolean = false): OSL = {
new OSL(kb, constants, nonEvidenceAtoms, modes, maxLength, allowFreeVariables,
threshold, clauseType, ilpSolver, lossAugmented, lambda, eta, delta,
printLearnedWeightsPerIteration, kb.formulas.flatMap(_.toCNF(constants)).toVector)
}
}
| anskarl/LoMRF | src/main/scala/lomrf/mln/learning/structure/OSL.scala | Scala | apache-2.0 | 13,717 |
package org.sisioh.aws4s.eb.model
import com.amazonaws.services.elasticbeanstalk.model.{ EnvironmentResourcesDescription, LoadBalancerDescription }
import org.sisioh.aws4s.PimpedType
object EnvironmentResourcesDescriptionFactory {
def create(): EnvironmentResourcesDescription =
new EnvironmentResourcesDescription()
}
class RichEnvironmentResourcesDescription(val underlying: EnvironmentResourcesDescription)
extends AnyVal
with PimpedType[EnvironmentResourcesDescription] {
def loadBalancerOpt: Option[LoadBalancerDescription] =
Option(underlying.getLoadBalancer)
def loadBalancerOpt_=(value: Option[LoadBalancerDescription]): Unit =
underlying.setLoadBalancer(value.orNull)
def withLoadBalancerOpt(value: Option[LoadBalancerDescription]): EnvironmentResourcesDescription =
underlying.withLoadBalancer(value.orNull)
}
| sisioh/aws4s | aws4s-eb/src/main/scala/org/sisioh/aws4s/eb/model/RichEnvironmentResourcesDescription.scala | Scala | mit | 861 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.domain.policies
import com.normation.utils.HashcodeCaching
import com.normation.cfclerk.domain.TechniqueVersion
import com.normation.cfclerk.domain.TechniqueName
import com.normation.cfclerk.domain.SectionSpec
/**
* That file define "diff" objects between directives.
*/
sealed trait DirectiveDiff
//for change request, with add type tag to DirectiveDiff
sealed trait ChangeRequestDirectiveDiff {
def techniqueName : TechniqueName
def directive : Directive
}
final case class DeleteDirectiveDiff(
techniqueName: TechniqueName
, directive : Directive
) extends DirectiveDiff with HashcodeCaching with ChangeRequestDirectiveDiff
// add and modify are put together
sealed trait DirectiveSaveDiff extends DirectiveDiff
final case class AddDirectiveDiff(
techniqueName: TechniqueName
, directive : Directive
) extends DirectiveSaveDiff with HashcodeCaching with ChangeRequestDirectiveDiff
final case class ModifyDirectiveDiff(
techniqueName : TechniqueName
, id : DirectiveId
, name : String //keep the name around to be able to display it as it was at that time
, modName : Option[SimpleDiff[String]] = None
, modTechniqueVersion: Option[SimpleDiff[TechniqueVersion]] = None
, modParameters : Option[SimpleDiff[SectionVal]] = None
, modShortDescription: Option[SimpleDiff[String]] = None
, modLongDescription : Option[SimpleDiff[String]] = None
, modPriority : Option[SimpleDiff[Int]] = None
, modIsActivated : Option[SimpleDiff[Boolean]] = None
, modIsSystem : Option[SimpleDiff[Boolean]] = None
) extends DirectiveSaveDiff with HashcodeCaching
final case class ModifyToDirectiveDiff(
techniqueName: TechniqueName
, directive : Directive
, rootSection : SectionSpec
) extends DirectiveDiff with HashcodeCaching with ChangeRequestDirectiveDiff
| jooooooon/rudder | rudder-core/src/main/scala/com/normation/rudder/domain/policies/DirectiveDiff.scala | Scala | agpl-3.0 | 3,654 |
package co.blocke.scalajack
import co.blocke.scalajack.json.JsonFlavor
import model._
import yaml.YamlFlavor
import delimited.DelimitedFlavor
import json4s.Json4sFlavor
import org.json4s.JValue
import json._
import yaml._
import delimited._
object Converters:
//------
// Note: Json, Json4s, and Yaml do NOT have a 'toDelimited' option because delimited format is *ordered*, while
// other 3 are intrinsically un-ordered, so there's no guarantee the delimited fields will be rendered in the
// 'correct' order.
//------
// JSON mappers
extension (j: JSON)
inline def mapJsonTo[T, S](toFlavor: JackFlavor[S])(fn: T => T)(implicit sjJ: JackFlavor[JSON]): S = toFlavor.render[T](fn(sjJ.read[T](j)))
extension (j: JSON)
inline def jsonToYaml[T](implicit sjY: JackFlavor[YAML], sjJ: JackFlavor[JSON]): YAML = sjY.render( sjJ.read[T](j) )
inline def jsonToJson4s[T](implicit sjV: JackFlavor[JValue], sjJ: JackFlavor[JSON]): JValue = sjV.render( sjJ.read[T](j) )
inline def fromJson[T](implicit sjJ: JackFlavor[JSON]): T = sjJ.read[T](j)
inline def mapJson[T](fn: T => T)(implicit sjJ: JackFlavor[JSON]): JSON = sjJ.render[T](fn(sjJ.read[T](j)))
// YAML mappers
extension (y: YAML)
inline def mapYamlTo[T, S](toFlavor: JackFlavor[S])(fn: T => T)(implicit sjY: JackFlavor[YAML]): S = toFlavor.render[T](fn(sjY.read[T](y)))
extension (y: YAML)
inline def yamlToJson[T](implicit sjJ: JackFlavor[JSON], sjY: JackFlavor[YAML]): JSON = sjJ.render( sjY.read[T](y) )
inline def yamlToJson4s[T](implicit sjV: JackFlavor[JValue], sjY: JackFlavor[YAML]): JValue = sjV.render( sjY.read[T](y) )
inline def fromYaml[T](implicit sjY: JackFlavor[YAML]): T = sjY.read[T](y)
inline def mapYaml[T](fn: T => T)(implicit sjY: JackFlavor[YAML]): YAML = sjY.render[T](fn(sjY.read[T](y)))
// DELIMITED mappers
extension (d: DELIMITED)
inline def mapDelimitedTo[T, S](toFlavor: JackFlavor[S])(fn: T => T)(implicit sjD: JackFlavor[DELIMITED]): S = toFlavor.render[T](fn(sjD.read[T](d)))
extension (d: DELIMITED)
inline def delimitedToJson[T](implicit sjJ: JackFlavor[JSON], sjD: JackFlavor[DELIMITED]): JSON = sjJ.render( sjD.read[T](d) )
inline def delimitedToJson4s[T](implicit sjV: JackFlavor[JValue], sjD: JackFlavor[DELIMITED]): JValue = sjV.render( sjD.read[T](d) )
inline def delimitedToYaml[T](implicit sjY: JackFlavor[YAML], sjD: JackFlavor[DELIMITED]): YAML = sjY.render( sjD.read[T](d) )
inline def fromDelimited[T](implicit sjD: JackFlavor[DELIMITED]): T = sjD.read[T](d)
inline def mapDelimited[T](fn: T => T)(implicit sjD: JackFlavor[DELIMITED]): DELIMITED = sjD.render[T](fn(sjD.read[T](d)))
// Json4s mappers
extension (j: JValue)
inline def mapJson4sTo[T, S](toFlavor: JackFlavor[S])(fn: T => T)(implicit sjV: JackFlavor[JValue]): S = toFlavor.render[T](fn(sjV.read[T](j)))
extension (j: JValue)
inline def json4sToYaml[T](implicit sjY: JackFlavor[YAML], sjV: JackFlavor[JValue]): YAML = sjY.render( sjV.read[T](j) )
inline def json4sToJson[T](implicit sjJ: JackFlavor[JSON], sjV: JackFlavor[JValue]): JSON = sjJ.render( sjV.read[T](j) )
inline def fromJson4s[T](implicit sjV: JackFlavor[JValue]): T = sjV.read[T](j)
inline def mapJson4s[T](fn: T => T)(implicit sjV: JackFlavor[JValue]): JValue = sjV.render[T](fn(sjV.read[T](j)))
extension[T] (a: T)
inline def toJson(implicit sjJ: JackFlavor[JSON]): JSON = sjJ.render(a)
inline def toYaml(implicit sjY: JackFlavor[YAML]): YAML = sjY.render(a)
inline def toDelimited(implicit sjD: JackFlavor[DELIMITED]): DELIMITED = sjD.render(a)
inline def toJson4s(implicit sjV: JackFlavor[JValue]): JValue = sjV.render(a)
| gzoller/ScalaJack | core/src/main/scala/co.blocke.scalajack/Converters.scala | Scala | mit | 3,995 |
package eu.timepit.refined
package examples
import eu.timepit.refined.implicits._
import eu.timepit.refined.numeric._
import shapeless.tag.@@
object PostErasureAnyVal {
val x: Int Refined Positive = 1
val y: Int @@ Positive = 1
val z: Int = 1
}
/*
This is what scalac 2.11.7 outputs with -Xprint:posterasure:
package eu.timepit.refined {
package examples {
object PostErasureAnyVal extends Object {
def <init>(): eu.timepit.refined.examples.PostErasureAnyVal.type = {
PostErasureAnyVal.super.<init>();
()
};
private[this] val x: Integer = (scala.Int.box(1).$asInstanceOf[Integer](): Integer);
<stable> <accessor> def x(): Integer = PostErasureAnyVal.this.x;
private[this] val y: Int = (unbox(scala.Int.box(1)): Int);
<stable> <accessor> def y(): Int = PostErasureAnyVal.this.y;
private[this] val z: Int = 1;
<stable> <accessor> def z(): Int = PostErasureAnyVal.this.z
}
}
}
*/
| beni55/refined | shared/src/test/scala/eu/timepit/refined/examples/PostErasureAnyVal.scala | Scala | mit | 969 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.nn.abstractnn.TensorCriterion
import com.intel.analytics.bigdl.dllib.tensor._
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Creates a criterion that optimizes a two-class classification logistic loss
* between input x (a Tensor of dimension 1) and output y (which is a tensor
* containing either 1s or -1s).
*
* loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x:nElement()
*
* @param sizeAverage The normalization by the number of elements in the input
* can be disabled by setting
*/
@SerialVersionUID(7573077918688542348L)
class SoftMarginCriterion[@specialized(Float, Double) T: ClassTag](var sizeAverage: Boolean = true)
(implicit ev: TensorNumeric[T])
extends TensorCriterion[T] {
def isSizeAverage: Boolean = sizeAverage
def setSizeAverage(sizeAverage: Boolean): this.type = {
this.sizeAverage = sizeAverage
this
}
// TODO: replace apply for performance optimization
override def updateOutput(input: Tensor[T], target: Tensor[T]): T = {
require(input.isSameSizeAs(target), "The input should have the same size as target" +
s"input size ${input.nElement()}, target size ${target.nElement()}")
var sum = ev.zero
val func2 = new TensorFunc4[T] {
override def apply(in: Array[T], index1: Int, tar: Array[T], index2: Int): Unit = {
val z = ev.log(ev.plus(ev.one, ev.exp(ev.negative(ev.times(in(index1), tar(index2))))))
sum = ev.plus(sum, z)
}
}
DenseTensorApply.apply2[T](input, target, func2)
if (sizeAverage) {
sum = ev.divide(sum, ev.fromType[Int](input.nElement()))
}
output = sum
output
}
// TODO: replace apply for performance optimization
override def updateGradInput(input: Tensor[T], target: Tensor[T]): Tensor[T] = {
require(input.isSameSizeAs(target), "The input should have the same size as target" +
s"input size ${input.nElement()}, target size ${target.nElement()}")
val norm = if (sizeAverage) {
ev.divide(ev.one, ev.fromType[Int](input.nElement()))
} else {
ev.one
}
gradInput.resizeAs(input)
val func = new TensorFunc6[T] {
override def apply (gradInput: Array[T], offset1: Int, input: Array[T],
offset2: Int, target: Array[T], offset3: Int): Unit = {
val z = ev.exp(ev.negative(ev.times(target(offset1), input(offset2))))
gradInput(offset1) = ev.divide(
ev.negative(ev.times(norm, ev.times(target(offset3), z))), ev.plus(ev.one, z))
}
}
DenseTensorApply.apply3[T](gradInput, input, target, func)
gradInput
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[SoftMarginCriterion[T]]
override def equals(other: Any): Boolean = other match {
case that: SoftMarginCriterion[T] =>
(that canEqual this) &&
gradInput == that.gradInput &&
sizeAverage == that.sizeAverage
case _ => false
}
override def hashCode(): Int = {
def getHashcode(state: Any): Int = if (state == null) 0 else state.hashCode()
val state = Seq(gradInput, sizeAverage)
state.map(getHashcode).foldLeft(0)((a, b) => 31 * a + b)
}
}
object SoftMarginCriterion {
def apply[@specialized(Float, Double) T: ClassTag](sizeAverage: Boolean = true)
(implicit ev: TensorNumeric[T]): SoftMarginCriterion[T] = {
new SoftMarginCriterion(sizeAverage)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/SoftMarginCriterion.scala | Scala | apache-2.0 | 4,107 |
package com.softwaremill.thegarden.json4s.serializers
import org.json4s.{DefaultFormats, Formats}
import org.scalatest.{Matchers, FlatSpec}
class CamelCaseFieldNameDeserializerSpec extends FlatSpec with Matchers {
implicit private val formats: Formats = new DefaultFormats {} + CamelCaseFieldNameDeserializer
val SerializedBlogPost = """{"post_id":10,"text":"foo bar"}"""
import org.json4s.jackson.Serialization.read
it should "change the serialization format in a way that field names with underscores fields are converted to camelCase" in {
val post = read[BlogPost](SerializedBlogPost)
post.postId shouldEqual 10
post.text shouldEqual "foo bar"
}
}
| maciej/the-garden | garden-json4s/src/test/scala/com/softwaremill/thegarden/json4s/serializers/CamelCaseFieldNameDeserializerSpec.scala | Scala | mit | 680 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entity
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.language.postfixOps
import scala.util.Try
import spray.json.DefaultJsonProtocol
import spray.json.DefaultJsonProtocol._
import spray.json._
import whisk.common.TransactionId
import whisk.core.database.DocumentFactory
import whisk.core.entity.types.EntityStore
/**
* WhiskPackagePut is a restricted WhiskPackage view that eschews properties
* that are auto-assigned or derived from URI: namespace and name.
*/
case class WhiskPackagePut(
binding: Option[Binding] = None,
parameters: Option[Parameters] = None,
version: Option[SemVer] = None,
publish: Option[Boolean] = None,
annotations: Option[Parameters] = None) {
/**
* Resolves the binding if it contains the default namespace.
*/
protected[core] def resolve(namespace: EntityName): WhiskPackagePut = {
WhiskPackagePut(binding.map(_.resolve(namespace)), parameters, version, publish, annotations)
}
}
/**
* A WhiskPackage provides an abstraction of the meta-data for a whisk package
* or package binding.
*
* The WhiskPackage object is used as a helper to adapt objects between
* the schema used by the database and the WhiskPackage abstraction.
*
* @param namespace the namespace for the action
* @param name the name of the action
* @param binding an optional binding, None for provider, Some for binding
* @param parameters the set of parameters to bind to the action environment
* @param version the semantic version
* @param publish true to share the action or false otherwise
* @param annotation the set of annotations to attribute to the package
* @throws IllegalArgumentException if any argument is undefined
*/
@throws[IllegalArgumentException]
case class WhiskPackage(
namespace: EntityPath,
override val name: EntityName,
binding: Option[Binding] = None,
parameters: Parameters = Parameters(),
version: SemVer = SemVer(),
publish: Boolean = false,
annotations: Parameters = Parameters())
extends WhiskEntity(name) {
require(binding != null || (binding map { _ != null } getOrElse true), "binding undefined")
/**
* Merges parameters into existing set of parameters for package.
* Existing parameters supersede those in p.
*/
def inherit(p: Parameters): WhiskPackage = copy(parameters = p ++ parameters).revision[WhiskPackage](rev)
/**
* Merges parameters into existing set of parameters for package.
* The parameters from p supersede parameters from this.
*/
def mergeParameters(p: Parameters): WhiskPackage = copy(parameters = parameters ++ p).revision[WhiskPackage](rev)
/**
* Gets the full path for the package.
* This is equivalent to calling this this.fullyQualifiedName(withVersion = false).fullPath.
*/
def fullPath: EntityPath = namespace.addPath(name)
/**
* Gets binding for package iff this is not already a package reference.
*/
def bind: Option[Binding] = {
if (binding.isDefined) {
None
} else {
Some(Binding(namespace.root, name))
}
}
/**
* Adds actions to package. The actions list is filtered so that only actions that
* match the package are included (must match package namespace/name).
*/
def withActions(actions: List[WhiskAction] = List()): WhiskPackageWithActions = {
withPackageActions(actions filter { a =>
val pkgns = binding map { b => b.namespace.addPath(b.name) } getOrElse { namespace.addPath(name) }
a.namespace == pkgns
} map { a =>
WhiskPackageAction(a.name, a.version, a.annotations)
})
}
/**
* Adds package actions to package as actions or feeds. An action is considered a feed
* is it defined the property "feed" in the annotation. The value of the property is ignored
* for this check.
*/
def withPackageActions(actions: List[WhiskPackageAction] = List()): WhiskPackageWithActions = {
val actionGroups = actions map { a =>
// group into "actions" and "feeds"
val feed = a.annotations.get(Parameters.Feed) map { _ => true } getOrElse false
(feed, a)
} groupBy { _._1 } mapValues { _.map(_._2) }
WhiskPackageWithActions(this, actionGroups.getOrElse(false, List()), actionGroups.getOrElse(true, List()))
}
def toJson = WhiskPackage.serdes.write(this).asJsObject
override def summaryAsJson = {
val JsObject(fields) = super.summaryAsJson
JsObject(fields + (WhiskPackage.bindingFieldName -> binding.isDefined.toJson))
}
}
/**
* A specialized view of a whisk action contained in a package.
* Eschews fields that are implied by package in a GET package response..
*/
case class WhiskPackageAction(name: EntityName, version: SemVer, annotations: Parameters)
/**
* Extends WhiskPackage to include list of actions contained in package.
* This is used in GET package response.
*/
case class WhiskPackageWithActions(wp: WhiskPackage, actions: List[WhiskPackageAction], feeds: List[WhiskPackageAction])
object WhiskPackage
extends DocumentFactory[WhiskPackage]
with WhiskEntityQueries[WhiskPackage]
with DefaultJsonProtocol {
val bindingFieldName = "binding"
override val collectionName = "packages"
/**
* Traverses a binding recursively to find the root package and
* merges parameters along the way if mergeParameters flag is set.
*
* @param db the entity store containing packages
* @param pkg the package document id to start resolving
* @param mergeParameters flag that indicates whether parameters should be merged during package resolution
* @return the same package if there is no binding, or the actual reference package otherwise
*/
def resolveBinding(db: EntityStore, pkg: DocId, mergeParameters: Boolean = false)(
implicit ec: ExecutionContext, transid: TransactionId): Future[WhiskPackage] = {
WhiskPackage.get(db, pkg) flatMap { wp =>
// if there is a binding resolve it
val resolved = wp.binding map { binding =>
if (mergeParameters) {
resolveBinding(db, binding.docid, true) map {
resolvedPackage => resolvedPackage.mergeParameters(wp.parameters)
}
} else resolveBinding(db, binding.docid)
}
resolved getOrElse Future.successful(wp)
}
}
override implicit val serdes = {
/**
* Custom serdes for a binding - this property must be present in the datastore records for
* packages so that views can map over packages vs bindings.
*/
implicit val bindingOverride = new JsonFormat[Option[Binding]] {
override def write(b: Option[Binding]) = Binding.optionalBindingSerializer.write(b)
override def read(js: JsValue) = Binding.optionalBindingDeserializer.read(js)
}
jsonFormat7(WhiskPackage.apply)
}
override val cacheEnabled = true
override def cacheKeyForUpdate(w: WhiskPackage) = w.docid.asDocInfo
}
/**
* A package binding holds a reference to the providing package
* namespace and package name.
*/
case class Binding(namespace: EntityName, name: EntityName) {
def fullyQualifiedName = FullyQualifiedEntityName(namespace.toPath, name)
def docid = fullyQualifiedName.toDocId
override def toString = fullyQualifiedName.toString
/**
* Returns a Binding namespace if it is the default namespace
* to the given one, otherwise this is an identity.
*/
def resolve(ns: EntityName): Binding = {
namespace.toPath match {
case EntityPath.DEFAULT => Binding(ns, name)
case _ => this
}
}
}
object Binding extends ArgNormalizer[Binding] with DefaultJsonProtocol {
override protected[core] val serdes = jsonFormat2(Binding.apply)
protected[entity] val optionalBindingDeserializer = new JsonReader[Option[Binding]] {
override def read(js: JsValue) = {
if (js == JsObject()) None else Some(serdes.read(js))
}
}
protected[entity] val optionalBindingSerializer = new JsonWriter[Option[Binding]] {
override def write(b: Option[Binding]) = b match {
case None => JsObject()
case Some(n) => Binding.serdes.write(n)
}
}
}
object WhiskPackagePut extends DefaultJsonProtocol {
implicit val serdes = {
implicit val bindingSerdes = Binding.serdes
implicit val optionalBindingSerdes = new OptionFormat[Binding] {
override def read(js: JsValue) = Binding.optionalBindingDeserializer.read(js)
override def write(n: Option[Binding]) = Binding.optionalBindingSerializer.write(n)
}
jsonFormat5(WhiskPackagePut.apply)
}
}
object WhiskPackageAction extends DefaultJsonProtocol {
implicit val serdes = jsonFormat3(WhiskPackageAction.apply)
}
object WhiskPackageWithActions {
implicit val serdes = new RootJsonFormat[WhiskPackageWithActions] {
def write(w: WhiskPackageWithActions) = {
val JsObject(pkg) = WhiskPackage.serdes.write(w.wp)
JsObject(pkg + ("actions" -> w.actions.toJson) + ("feeds" -> w.feeds.toJson))
}
def read(value: JsValue) = Try {
val pkg = WhiskPackage.serdes.read(value)
val actions = value.asJsObject.getFields("actions") match {
case Seq(JsArray(as)) =>
as map { a => WhiskPackageAction.serdes.read(a) } toList
case _ => List()
}
val feeds = value.asJsObject.getFields("feeds") match {
case Seq(JsArray(as)) =>
as map { a => WhiskPackageAction.serdes.read(a) } toList
case _ => List()
}
WhiskPackageWithActions(pkg, actions, feeds)
} getOrElse deserializationError("whisk package with actions malformed")
}
}
| prccaraujo/openwhisk | common/scala/src/main/scala/whisk/core/entity/WhiskPackage.scala | Scala | apache-2.0 | 10,932 |
package argonaut
object DecodeJsonMonocle extends DecodeJsonMonocles
trait DecodeJsonMonocles {
}
| jedws/argonaut | argonaut-monocle/src/main/scala/argonaut/DecodeJsonMonocle.scala | Scala | bsd-3-clause | 100 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.persistence.inmemory
package snapshot
import java.util.concurrent.TimeUnit
import akka.actor.{ ActorRef, ActorSystem }
import akka.pattern.ask
import akka.persistence.inmemory.extension.InMemorySnapshotStorage._
import akka.persistence.inmemory.extension.StorageExtensionProvider
import akka.persistence.serialization.Snapshot
import akka.persistence.snapshot.SnapshotStore
import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.Timeout
import com.typesafe.config.Config
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scalaz.OptionT
import scalaz.std.AllInstances._
class InMemorySnapshotStore(config: Config) extends SnapshotStore {
implicit val system: ActorSystem = context.system
implicit val ec: ExecutionContext = context.dispatcher
implicit val mat: Materializer = ActorMaterializer()
implicit val timeout: Timeout = Timeout(config.getDuration("ask-timeout", TimeUnit.SECONDS) -> SECONDS)
val serialization = SerializationExtension(system)
val snapshots: ActorRef = StorageExtensionProvider(system).snapshotStorage(config)
def deserialize(snapshotEntry: SnapshotEntry): Future[Option[Snapshot]] =
Future.fromTry(serialization.deserialize(snapshotEntry.snapshot, classOf[Snapshot])).map(Option(_))
override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
val maybeEntry: Future[Option[SnapshotEntry]] = criteria match {
case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>
(snapshots ? SnapshotForMaxSequenceNr(persistenceId, Long.MaxValue)).mapTo[Option[SnapshotEntry]]
case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>
(snapshots ? SnapshotForMaxTimestamp(persistenceId, maxTimestamp)).mapTo[Option[SnapshotEntry]]
case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>
(snapshots ? SnapshotForMaxSequenceNr(persistenceId, maxSequenceNr)).mapTo[Option[SnapshotEntry]]
case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>
(snapshots ? SnapshotForMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp)).mapTo[Option[SnapshotEntry]]
case _ => Future.successful(None)
}
val result = for {
entry <- OptionT(maybeEntry)
snapshot <- OptionT(deserialize(entry))
} yield SelectedSnapshot(SnapshotMetadata(entry.persistenceId, entry.sequenceNumber, entry.created), snapshot.data)
result.run
}
override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = for {
snapshot <- Future.fromTry(serialization.serialize(Snapshot(snapshot)))
_ <- snapshots ? Save(metadata.persistenceId, metadata.sequenceNr, metadata.timestamp, snapshot)
} yield ()
override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] =
(snapshots ? Delete(metadata.persistenceId, metadata.sequenceNr)).map(_ => ())
override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = criteria match {
case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>
(snapshots ? DeleteAllSnapshots(persistenceId)).map(_ => ())
case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>
(snapshots ? DeleteUpToMaxTimestamp(persistenceId, maxTimestamp)).map(_ => ())
case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>
(snapshots ? DeleteUpToMaxSequenceNr(persistenceId, maxSequenceNr)).map(_ => ())
case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>
(snapshots ? DeleteUpToMaxSequenceNrAndMaxTimestamp(persistenceId, maxSequenceNr, maxTimestamp)).map(_ => ())
case _ => Future.successful(())
}
}
| dnvriend/akka-persistence-inmemory | src/main/scala/akka/persistence/inmemory/snapshot/InMemorySnapshotStore.scala | Scala | apache-2.0 | 4,528 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.secondaryindex.events
import scala.collection.JavaConverters._
import scala.util.Try
import org.apache.log4j.Logger
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.index.CarbonIndexUtil
import org.apache.spark.sql.secondaryindex.command.IndexModel
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.metadata.index.IndexType
import org.apache.carbondata.events._
import org.apache.carbondata.processing.loading.events.LoadEvents.LoadTablePreStatusUpdateEvent
class SILoadEventListener extends OperationEventListener with Logging {
val LOGGER: Logger = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Called on a specified event occurrence
*
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
event match {
case _: LoadTablePreStatusUpdateEvent =>
LOGGER.info("Load pre status update event-listener called")
val loadTablePreStatusUpdateEvent = event.asInstanceOf[LoadTablePreStatusUpdateEvent]
val carbonLoadModel = loadTablePreStatusUpdateEvent.getCarbonLoadModel
val sparkSession = SparkSession.getActiveSession.get
// when Si creation and load to main table are parallel, get the carbonTable from the
// metastore which will have the latest index Info
val metaStore = CarbonEnv.getInstance(sparkSession).carbonMetaStore
val carbonTable = metaStore
.lookupRelation(Some(carbonLoadModel.getDatabaseName),
carbonLoadModel.getTableName)(sparkSession).asInstanceOf[CarbonRelation].carbonTable
val indexMetadata = carbonTable.getIndexMetadata
val secondaryIndexProvider = IndexType.SI.getIndexProviderName
if (null != indexMetadata && null != indexMetadata.getIndexesMap &&
null != indexMetadata.getIndexesMap.get(secondaryIndexProvider)) {
val indexTables = indexMetadata.getIndexesMap
.get(secondaryIndexProvider).keySet().asScala
// if there are no index tables for a given fact table do not perform any action
if (indexTables.nonEmpty) {
indexTables.foreach {
indexTableName =>
val secondaryIndex = IndexModel(Some(carbonTable.getDatabaseName),
indexMetadata.getParentTableName,
indexMetadata
.getIndexColumns(secondaryIndexProvider, indexTableName).split(",").toList,
indexTableName)
val metaStore = CarbonEnv.getInstance(sparkSession).carbonMetaStore
val indexTable = metaStore
.lookupRelation(Some(carbonLoadModel.getDatabaseName),
indexTableName)(sparkSession).asInstanceOf[CarbonRelation].carbonTable
val isInsertOverwrite = operationContext.getProperties
.containsKey("isOverwrite") && Try(operationContext
.getProperty("isOverwrite").toString.toBoolean).getOrElse(false)
CarbonIndexUtil
.LoadToSITable(sparkSession,
carbonLoadModel,
indexTableName,
isLoadToFailedSISegments = false,
secondaryIndex,
carbonTable, indexTable, isInsertOverwrite)
}
} else {
logInfo(s"No index tables found for table: ${carbonTable.getTableName}")
}
} else {
logInfo(s"Index information is null for table: ${carbonTable.getTableName}")
}
}
}
}
| zzcclp/carbondata | integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListener.scala | Scala | apache-2.0 | 4,528 |
import com.alanjz.meerkat.moves.QueenMover
import com.alanjz.meerkat.util.position.mutable.{NodeStringBuilder, FENMaskNodeBuilder}
/**
* Created by alan on 12/15/14.
*/
object QueenMoverTest extends App {
/*
8/8/2NNN3/2NQN3/2NNN3/8/8/8 w - - 0 1
8/8/2nnn3/2nQn3/2nnn3/8/8/8 w - - 0 1
8/8/8/3Q4/8/8/8/8 w - - 0 1
8/8/8/8/8/8/6N1/6NQ w - - 0 1
8/8/2nnn3/2nqn3/2nnn3/8/8/8 b - - 0 1
8/8/2NNN3/2NqN3/2NNN3/8/8/8 b - - 0 1
8/8/8/3q4/8/8/8/8 b - - 0 1
8/8/8/8/8/8/6n1/6nq b - - 0 1
*/
val pos = FENMaskNodeBuilder.parse("8/8/8/8/8/8/6n1/6nq b - - 0 1")
println( NodeStringBuilder.mkString(pos) )
val moves = new QueenMover(pos).mkList
println("moves: " + moves.mkString(" "))
}
| spacenut/meerkat-chess | scripts/QueenMoverTest.scala | Scala | gpl-2.0 | 692 |
package mesosphere.marathon
package api
import java.util.concurrent.{CountDownLatch, Semaphore, TimeUnit}
import javax.servlet.FilterChain
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import mesosphere.UnitTest
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class LimitConcurrentRequestsFilterTest extends UnitTest {
"LimitConcurrentRequestsFilter" should {
"Multiple requests below boundary get answered correctly" in {
Given("A http filter chain")
val latch = new CountDownLatch(2)
val request = mock[HttpServletRequest]
val response = mock[HttpServletResponse]
val chain = mock[FilterChain]
chain.doFilter(request, response) answers { args => latch.countDown() }
val rf = new LimitConcurrentRequestsFilter(Some(2))
When("requests where made before the limit")
Future(rf.doFilter(request, response, chain)) // linter:ignore:IdenticalStatements
Future(rf.doFilter(request, response, chain))
Then("The requests got answered")
latch.await(5, TimeUnit.SECONDS) should be(true)
}
"Multiple requests above boundary get a 503" in {
Given("A http filter chain")
val semaphore = new Semaphore(1)
val latch = new CountDownLatch(1)
semaphore.acquire()
val request = mock[HttpServletRequest]
val response = mock[HttpServletResponse]
val chain = mock[FilterChain]
chain.doFilter(request, response) answers { args => latch.countDown(); semaphore.acquire() /* blocks*/ }
val rf = new LimitConcurrentRequestsFilter(Some(1))
When("requests where made before the limit")
Future(rf.doFilter(request, response, chain))
latch.await(5, TimeUnit.SECONDS) should be(true) //make sure, first "request" has passed
rf.doFilter(request, response, chain) //no future, since that should fail synchronously
Then("The requests got answered")
verify(chain, times(1)).doFilter(request, response)
verify(response, times(1)).sendError(503, "Too many concurrent requests! Allowed: 1.")
semaphore.release() //release the blocked thread
}
"If no limit is given, no semaphore is used" in {
Given("A http filter chain with no limit")
val latch = new CountDownLatch(1)
val request = mock[HttpServletRequest]
val response = mock[HttpServletResponse]
val chain = mock[FilterChain]
chain.doFilter(request, response) answers { args => latch.countDown() }
val rf = new LimitConcurrentRequestsFilter(None)
rf.semaphore.availablePermits() should be(0)
When("A request is made")
Future(rf.doFilter(request, response, chain))
Then("Even the semaphore is 0 the request can be made and the pass function is used")
latch.await(5, TimeUnit.SECONDS) should be(true)
}
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/api/LimitConcurrentRequestsFilterTest.scala | Scala | apache-2.0 | 2,870 |
package thangiee.riotapi.matchhistory
case class PlayerHistory(matches: List[MatchSummary])
| Thangiee/Riot-API-Scala | src/main/scala/thangiee/riotapi/matchhistory/PlayerHistory.scala | Scala | mit | 93 |
package net.revenj.patterns
import scala.concurrent.Future
/** Service for searching and counting domain objects.
* Search can be performed using {@link Specification specification},
* paged using limit and offset arguments.
*
* @tparam T domain object type.
*/
trait SearchableRepository[T <: DataSource] {
/** Returns an IndexedSeq of domain objects satisfying {@link Specification specification}
* with up to <code>limit</code> results.
* <code>offset</code> can be used to skip initial results.
*
* @param specification search predicate
* @param limit maximum number of results
* @param offset number of results to be skipped
* @return future to domain objects which satisfy search predicate
*/
def search(
specification: Option[Specification[T]] = None,
limit: Option[Int] = None,
offset: Option[Int] = None): Future[scala.collection.IndexedSeq[T]]
/** Returns the number of elements satisfying provided specification.
*
* @param specification search predicate
* @return how many domain objects satisfies specification
*/
def count(specification: Option[Specification[T]] = None): Future[Long]
/** Check if any element satisfying provided specification exists.
*
* @param specification search predicate
* @return at least one element satisfies specification
*/
def exists(specification: Option[Specification[T]] = None): Future[Boolean]
}
| ngs-doo/revenj | scala/revenj-core/src/main/scala/net/revenj/patterns/SearchableRepository.scala | Scala | bsd-3-clause | 1,507 |
package pl.touk.nussknacker.test
import org.scalactic.source
import org.scalatest.{Assertion, Assertions}
import scala.reflect.ClassTag
trait NussknackerAssertions extends Assertions {
def assertThrowsWithParent[T <: AnyRef](f: => Any)(implicit classTag: ClassTag[T], pos: source.Position): Assertion = {
assertThrows[T] {
try {
f
} catch {
case u: Throwable if u.getCause != null =>
throw u.getCause
}
}
}
}
| TouK/nussknacker | utils/test-utils/src/main/scala/pl/touk/nussknacker/test/NussknackerAssertions.scala | Scala | apache-2.0 | 468 |
package org.opencommercesearch.api.controllers
/*
* Licensed to OpenCommerceSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. OpenCommerceSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.wordnik.swagger.annotations._
import javax.ws.rs.{PathParam, QueryParam}
import play.api.libs.concurrent.Execution.Implicits._
import play.api.Play
import play.api.libs.json._
import scala.concurrent.Future
import scala.collection.mutable
import org.opencommercesearch.api.Global._
import org.opencommercesearch.search.suggester.MultiSuggester
import org.opencommercesearch.search.Element
import org.opencommercesearch.search.collector.{SimpleCollector, MultiSourceCollector}
import org.opencommercesearch.api.models.FacetSuggestion
/**
* The controller for generic suggestions
*
* @author rmerizalde
*/
@Api(value = "suggestions", basePath = "/api-docs/suggestions", description = "Suggestion API endpoints")
object SuggestionController extends BaseController {
val suggester = new MultiSuggester
/**
* @param source is the collector source
* @return return the capacity for the collector source
*/
private def collectorCapacity(source: String) : Int = {
Play.current.configuration.getInt(s"suggester.$source.collector.capacity").getOrElse(SimpleCollector.DefaultCapacity)
}
@ApiOperation(
value = "Suggests user queries, products, categories, brands, etc.",
notes = "Returns suggestions for given partial user query",
httpMethod = "GET")
@ApiResponses(value = Array(new ApiResponse(code = 400, message = "Partial product title is too short")))
def findSuggestions(
version: Int,
@ApiParam(value = "Partial user query", required = true)
@QueryParam("q")
q: String,
@ApiParam(value = "Site to search for suggestions", required = true)
@QueryParam("site")
site: String,
@ApiParam(defaultValue="false", allowableValues="true,false", value = "Display preview results", required = false)
@QueryParam("preview")
preview: Boolean,
@ApiParam(defaultValue="false", allowableValues="true,false", value = "Display facets", required = false)
@QueryParam("facet")
facet: Boolean) = ContextAction.async { implicit context => implicit request =>
if (q == null || q.length < 2) {
Future.successful(withCorsHeaders(BadRequest(Json.obj(
"message" -> s"At least $MinSuggestQuerySize characters are needed to make suggestions"
))))
} else {
val startTime = System.currentTimeMillis()
val collector = new MultiSourceCollector[Element]
suggester.sources().map(source => collector.add(source, new SimpleCollector[Element](collectorCapacity(source))) )
suggester.search(q, site, collector, solrServer, facet).flatMap(c => {
if (!collector.isEmpty) {
var futureList = new mutable.ArrayBuffer[Future[(String, Json.JsValueWrapper)]]
for (source <- collector.sources) {
for (c <- collector.collector(source)) {
futureList += Future.successful(suggester.responseName(source) -> c.elements().map(e => e.toJson))
}
}
for {
results <- Future.sequence(futureList)
} yield {
val json = Json.obj(results: _*)
val facetsSuggestions = (json \\ "facetSuggestions")(0)
val suggestions = Json.obj(results: _*) - "facetSuggestions"
val productIds = suggestions \ "products" match {
case products: JsValue => products \\ "id" map {id => id.as[String]}
case _ => Seq.empty[String]
}
withCacheHeaders(withCorsHeaders(Ok(Json.obj(
"metadata" -> Json.obj(
"found" -> collector.size(),
"time" -> (System.currentTimeMillis() - startTime),
"facets" -> facetsSuggestions),
"suggestions" -> suggestions))), productIds)
}
} else {
Future.successful(withCorsHeaders(Ok(Json.obj(
"metadata" -> Json.obj(
"found" -> 0,
"time" -> (System.currentTimeMillis() - startTime))
))))
}
})
}
}
}
| madickson/opencommercesearch | opencommercesearch-api/app/org/opencommercesearch/api/controllers/SuggestionController.scala | Scala | apache-2.0 | 4,842 |
/*
* Sonar Scoverage Plugin
* Copyright (C) 2013 Rado Buransky
* dev@sonar.codehaus.org
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
*/
package com.buransky.plugins.scoverage.sensor
import com.buransky.plugins.scoverage.language.Scala
import com.buransky.plugins.scoverage.measure.ScalaMetrics
import com.buransky.plugins.scoverage.pathcleaner.{BruteForceSequenceMatcher, PathSanitizer}
import com.buransky.plugins.scoverage.util.LogUtil
import com.buransky.plugins.scoverage.xml.XmlScoverageReportParser
import com.buransky.plugins.scoverage.{CoveredStatement, DirectoryStatementCoverage, FileStatementCoverage, _}
import org.sonar.api.batch.fs.{FileSystem, InputFile, InputPath}
import org.sonar.api.batch.{CoverageExtension, Sensor, SensorContext}
import org.sonar.api.config.Settings
import org.sonar.api.measures.{CoverageMeasuresBuilder, Measure}
import org.sonar.api.resources.{Project, Resource}
import org.sonar.api.scan.filesystem.PathResolver
import org.sonar.api.utils.log.Loggers
import scala.collection.JavaConversions._
/**
* Main sensor for importing Scoverage report to Sonar.
*
* @author Rado Buransky
*/
class ScoverageSensor(settings: Settings, pathResolver: PathResolver, fileSystem: FileSystem)
extends Sensor with CoverageExtension {
private val log = Loggers.get(classOf[ScoverageSensor])
protected val SCOVERAGE_REPORT_PATH_PROPERTY = "sonar.scoverage.reportPath"
protected lazy val scoverageReportParser: ScoverageReportParser = XmlScoverageReportParser()
override def shouldExecuteOnProject(project: Project): Boolean = fileSystem.languages().contains(Scala.key)
override def analyse(project: Project, context: SensorContext) {
scoverageReportPath match {
case Some(reportPath) =>
// Single-module project
val srcOption = Option(settings.getString("sonar.sources"))
val sonarSources = srcOption match {
case Some(src) => src
case None => {
log.warn(s"could not find settings key sonar.sources assuming src/main/scala.")
"src/main/scala"
}
}
val pathSanitizer = createPathSanitizer(sonarSources)
processProject(scoverageReportParser.parse(reportPath, pathSanitizer), project, context, sonarSources)
case None =>
// Multi-module project has report path set for each module individually
analyseMultiModuleProject(project, context)
}
}
override val toString = getClass.getSimpleName
protected def createPathSanitizer(sonarSources: String): PathSanitizer
= new BruteForceSequenceMatcher(fileSystem.baseDir(), sonarSources)
private lazy val scoverageReportPath: Option[String] = {
settings.getString(SCOVERAGE_REPORT_PATH_PROPERTY) match {
case null => None
case path: String =>
pathResolver.relativeFile(fileSystem.baseDir, path) match {
case report: java.io.File if !report.exists || !report.isFile =>
log.error(LogUtil.f("Report not found at {}"), report)
None
case report: java.io.File => Some(report.getAbsolutePath)
}
}
}
private def analyseMultiModuleProject(project: Project, context: SensorContext) {
project.isModule match {
case true => log.warn(LogUtil.f("Report path not set for " + project.name + " module! [" +
project.name + "." + SCOVERAGE_REPORT_PATH_PROPERTY + "]"))
case _ =>
// Compute overall statement coverage from submodules
val totalStatementCount = project.getModules.map(analyseStatementCountForModule(_, context)).sum
val coveredStatementCount = project.getModules.map(analyseCoveredStatementCountForModule(_, context)).sum
if (totalStatementCount > 0) {
// Convert to percentage
val overall = (coveredStatementCount.toDouble / totalStatementCount.toDouble) * 100.0
// Set overall statement coverage
context.saveMeasure(project, createStatementCoverage(overall))
log.info(LogUtil.f("Overall statement coverage is " + ("%1.2f" format overall)))
}
}
}
private def analyseCoveredStatementCountForModule(module: Project, context: SensorContext): Long = {
// Aggregate modules
context.getMeasure(module, ScalaMetrics.coveredStatements) match {
case null =>
log.debug(LogUtil.f("Module has no statement coverage. [" + module.name + "]"))
0
case moduleCoveredStatementCount: Measure[_] =>
log.debug(LogUtil.f("Covered statement count for " + module.name + " module. [" +
moduleCoveredStatementCount.getValue + "]"))
moduleCoveredStatementCount.getValue.toLong
}
}
private def analyseStatementCountForModule(module: Project, context: SensorContext): Long = {
// Aggregate modules
context.getMeasure(module, ScalaMetrics.totalStatements) match {
case null =>
log.debug(LogUtil.f("Module has no number of statements. [" + module.name + "]"))
0
case moduleStatementCount: Measure[_] =>
log.debug(LogUtil.f("Statement count for " + module.name + " module. [" +
moduleStatementCount.getValue + "]"))
moduleStatementCount.getValue.toLong
}
}
private def processProject(projectCoverage: ProjectStatementCoverage, project: Project, context: SensorContext, sonarSources: String) {
// Save measures
saveMeasures(context, project, projectCoverage)
log.info(LogUtil.f("Statement coverage for " + project.getKey + " is " + ("%1.2f" format projectCoverage.rate)))
// Process children
processChildren(projectCoverage.children, context, sonarSources)
}
private def processDirectory(directoryCoverage: DirectoryStatementCoverage, context: SensorContext, parentDirectory: String) {
// save measures if any
if (directoryCoverage.statementCount > 0) {
val path = appendFilePath(parentDirectory, directoryCoverage.name)
getResource(path, context, false) match {
case Some(srcDir) => {
// Save directory measures
saveMeasures(context, srcDir, directoryCoverage)
}
case None =>
}
}
// Process children
processChildren(directoryCoverage.children, context, appendFilePath(parentDirectory, directoryCoverage.name))
}
private def processFile(fileCoverage: FileStatementCoverage, context: SensorContext, directory: String) {
val path = appendFilePath(directory, fileCoverage.name)
getResource(path, context, true) match {
case Some(scalaSourceFile) => {
// Save measures
saveMeasures(context, scalaSourceFile, fileCoverage)
// Save line coverage. This is needed just for source code highlighting.
saveLineCoverage(fileCoverage.statements, scalaSourceFile, context)
}
case None =>
}
}
private def getResource(path: String, context: SensorContext, isFile: Boolean): Option[Resource] = {
val inputOption: Option[InputPath] = if (isFile) {
val p = fileSystem.predicates()
Option(fileSystem.inputFile(p.and(
p.hasRelativePath(path),
p.hasLanguage(Scala.key),
p.hasType(InputFile.Type.MAIN))))
} else {
Option(fileSystem.inputDir(pathResolver.relativeFile(fileSystem.baseDir(), path)))
}
inputOption match {
case Some(path: InputPath) =>
Some(context.getResource(path))
case None => {
log.warn(s"File or directory not found in file system! ${path}")
None
}
}
}
private def saveMeasures(context: SensorContext, resource: Resource, statementCoverage: StatementCoverage) {
context.saveMeasure(resource, createStatementCoverage(statementCoverage.rate))
context.saveMeasure(resource, createStatementCount(statementCoverage.statementCount))
context.saveMeasure(resource, createCoveredStatementCount(statementCoverage.coveredStatementsCount))
log.debug(LogUtil.f("Save measures [" + statementCoverage.rate + ", " + statementCoverage.statementCount +
", " + statementCoverage.coveredStatementsCount + ", " + statementCoverage.branchRate + ", " + resource.getKey + "]"))
}
private def saveLineCoverage(coveredStatements: Iterable[CoveredStatement], resource: Resource,
context: SensorContext) {
// Convert statements to lines
val coveredLines = StatementCoverage.statementCoverageToLineCoverage(coveredStatements)
// Set line hits
val coverage = CoverageMeasuresBuilder.create()
coveredLines.foreach { coveredLine =>
coverage.setHits(coveredLine.line, coveredLine.hitCount)
coverage.setConditions(coveredLine.line, coveredLine.conditions, coveredLine.coveredConditions)
}
// Save measures
coverage.createMeasures().toList.foreach(context.saveMeasure(resource, _))
}
private def processChildren(children: Iterable[StatementCoverage], context: SensorContext, directory: String) {
children.foreach(processChild(_, context, directory))
}
private def processChild(dirOrFile: StatementCoverage, context: SensorContext, directory: String) {
dirOrFile match {
case dir: DirectoryStatementCoverage => processDirectory(dir, context, directory)
case file: FileStatementCoverage => processFile(file, context, directory)
case _ => throw new IllegalStateException("Not a file or directory coverage! [" +
dirOrFile.getClass.getName + "]")
}
}
private def createStatementCoverage[T <: Serializable](rate: Double): Measure[T] =
new Measure[T](ScalaMetrics.statementCoverage, rate)
private def createStatementCount[T <: Serializable](statements: Int): Measure[T] =
new Measure(ScalaMetrics.totalStatements, statements.toDouble, 0)
private def createCoveredStatementCount[T <: Serializable](coveredStatements: Int): Measure[T] =
new Measure(ScalaMetrics.coveredStatements, coveredStatements.toDouble, 0)
private def appendFilePath(src: String, name: String) = {
val result = src match {
case java.io.File.separator => java.io.File.separator
case empty if empty.isEmpty => ""
case other => other + java.io.File.separator
}
result + name
}
}
| RadoBuransky/sonar-scoverage-plugin | plugin/src/main/scala/com/buransky/plugins/scoverage/sensor/ScoverageSensor.scala | Scala | lgpl-3.0 | 10,857 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit}
import com.yammer.metrics.core.{Gauge, Meter}
import kafka.admin.{AdminUtils, PreferredReplicaLeaderElectionCommand}
import kafka.api._
import kafka.cluster.Broker
import kafka.common.{TopicAndPartition, _}
import kafka.log.LogConfig
import kafka.metrics.{KafkaMetricsGroup, KafkaTimer}
import kafka.server._
import kafka.utils.ZkUtils._
import kafka.utils._
import org.I0Itec.zkclient.exception.{ZkNoNodeException, ZkNodeExistsException}
import org.I0Itec.zkclient.{IZkChildListener, IZkDataListener, IZkStateListener}
import org.apache.kafka.common.errors.{BrokerNotAvailableException, ControllerMovedException}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, StopReplicaResponse}
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.Watcher.Event.KeeperState
import scala.collection._
import scala.util.Try
class ControllerContext(val zkUtils: ZkUtils) {
var controllerChannelManager: ControllerChannelManager = null
var shuttingDownBrokerIds: mutable.Set[Int] = mutable.Set.empty
var epoch: Int = KafkaController.InitialControllerEpoch - 1
var epochZkVersion: Int = KafkaController.InitialControllerEpochZkVersion - 1
var allTopics: Set[String] = Set.empty
var partitionReplicaAssignment: mutable.Map[TopicAndPartition, Seq[Int]] = mutable.Map.empty
var partitionLeadershipInfo: mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = mutable.Map.empty
val partitionsBeingReassigned: mutable.Map[TopicAndPartition, ReassignedPartitionsContext] = new mutable.HashMap
val partitionsUndergoingPreferredReplicaElection: mutable.Set[TopicAndPartition] = new mutable.HashSet
private var liveBrokersUnderlying: Set[Broker] = Set.empty
private var liveBrokerIdsUnderlying: Set[Int] = Set.empty
// setter
def liveBrokers_=(brokers: Set[Broker]) {
liveBrokersUnderlying = brokers
liveBrokerIdsUnderlying = liveBrokersUnderlying.map(_.id)
}
// getter
def liveBrokers = liveBrokersUnderlying.filter(broker => !shuttingDownBrokerIds.contains(broker.id))
def liveBrokerIds = liveBrokerIdsUnderlying -- shuttingDownBrokerIds
def liveOrShuttingDownBrokerIds = liveBrokerIdsUnderlying
def liveOrShuttingDownBrokers = liveBrokersUnderlying
def partitionsOnBroker(brokerId: Int): Set[TopicAndPartition] = {
partitionReplicaAssignment.collect {
case (topicAndPartition, replicas) if replicas.contains(brokerId) => topicAndPartition
}.toSet
}
def replicasOnBrokers(brokerIds: Set[Int]): Set[PartitionAndReplica] = {
brokerIds.flatMap { brokerId =>
partitionReplicaAssignment.collect {
case (topicAndPartition, replicas) if replicas.contains(brokerId) =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, brokerId)
}
}.toSet
}
def replicasForTopic(topic: String): Set[PartitionAndReplica] = {
partitionReplicaAssignment
.filter { case (topicAndPartition, _) => topicAndPartition.topic == topic }
.flatMap { case (topicAndPartition, replicas) =>
replicas.map { r =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, r)
}
}.toSet
}
def partitionsForTopic(topic: String): collection.Set[TopicAndPartition] =
partitionReplicaAssignment.keySet.filter(topicAndPartition => topicAndPartition.topic == topic)
def allLiveReplicas(): Set[PartitionAndReplica] = {
replicasOnBrokers(liveBrokerIds)
}
def replicasForPartition(partitions: collection.Set[TopicAndPartition]): collection.Set[PartitionAndReplica] = {
partitions.flatMap { p =>
val replicas = partitionReplicaAssignment(p)
replicas.map(r => new PartitionAndReplica(p.topic, p.partition, r))
}
}
def removeTopic(topic: String) = {
partitionLeadershipInfo = partitionLeadershipInfo.filter{ case (topicAndPartition, _) => topicAndPartition.topic != topic }
partitionReplicaAssignment = partitionReplicaAssignment.filter{ case (topicAndPartition, _) => topicAndPartition.topic != topic }
allTopics -= topic
}
}
object KafkaController extends Logging {
val stateChangeLogger = new StateChangeLogger("state.change.logger")
val InitialControllerEpoch = 1
val InitialControllerEpochZkVersion = 1
case class StateChangeLogger(override val loggerName: String) extends Logging
def parseControllerId(controllerInfoString: String): Int = {
try {
Json.parseFull(controllerInfoString) match {
case Some(m) =>
val controllerInfo = m.asInstanceOf[Map[String, Any]]
controllerInfo("brokerid").asInstanceOf[Int]
case None => throw new KafkaException("Failed to parse the controller info json [%s].".format(controllerInfoString))
}
} catch {
case _: Throwable =>
// It may be due to an incompatible controller register version
warn("Failed to parse the controller info as json. "
+ "Probably this controller is still using the old format [%s] to store the broker id in zookeeper".format(controllerInfoString))
try {
controllerInfoString.toInt
} catch {
case t: Throwable => throw new KafkaException("Failed to parse the controller info: " + controllerInfoString + ". This is neither the new or the old format.", t)
}
}
}
}
class KafkaController(val config: KafkaConfig, zkUtils: ZkUtils, val brokerState: BrokerState, time: Time, metrics: Metrics, threadNamePrefix: Option[String] = None) extends Logging with KafkaMetricsGroup {
this.logIdent = "[Controller " + config.brokerId + "]: "
private val stateChangeLogger = KafkaController.stateChangeLogger
val controllerContext = new ControllerContext(zkUtils)
val partitionStateMachine = new PartitionStateMachine(this)
val replicaStateMachine = new ReplicaStateMachine(this)
// have a separate scheduler for the controller to be able to start and stop independently of the
// kafka server
private val kafkaScheduler = new KafkaScheduler(1)
var topicDeletionManager: TopicDeletionManager = null
val offlinePartitionSelector = new OfflinePartitionLeaderSelector(controllerContext, config)
private val reassignedPartitionLeaderSelector = new ReassignedPartitionLeaderSelector(controllerContext)
private val preferredReplicaPartitionLeaderSelector = new PreferredReplicaPartitionLeaderSelector(controllerContext)
private val controlledShutdownPartitionLeaderSelector = new ControlledShutdownLeaderSelector(controllerContext)
private val brokerRequestBatch = new ControllerBrokerRequestBatch(this)
private val controllerEventQueue = new LinkedBlockingQueue[ControllerEvent]
private val controllerEventThread = new ControllerEventThread("controller-event-thread")
private val brokerChangeListener = new BrokerChangeListener(this)
private val topicChangeListener = new TopicChangeListener(this)
private val topicDeletionListener = new TopicDeletionListener(this)
private val partitionModificationsListeners: mutable.Map[String, PartitionModificationsListener] = mutable.Map.empty
private val partitionReassignmentListener = new PartitionReassignmentListener(this)
private val preferredReplicaElectionListener = new PreferredReplicaElectionListener(this)
private val isrChangeNotificationListener = new IsrChangeNotificationListener(this)
private val activeControllerId = new AtomicInteger(-1)
private val offlinePartitionCount = new AtomicInteger(0)
private val preferredReplicaImbalanceCount = new AtomicInteger(0)
newGauge(
"ActiveControllerCount",
new Gauge[Int] {
def value() = if (isActive) 1 else 0
}
)
newGauge(
"OfflinePartitionsCount",
new Gauge[Int] {
def value(): Int = {
offlinePartitionCount.get()
}
}
)
newGauge(
"PreferredReplicaImbalanceCount",
new Gauge[Int] {
def value(): Int = {
preferredReplicaImbalanceCount.get()
}
}
)
def epoch: Int = controllerContext.epoch
def clientId: String = {
val controllerListener = config.listeners.find(_.listenerName == config.interBrokerListenerName).getOrElse(
throw new IllegalArgumentException(s"No listener with name ${config.interBrokerListenerName} is configured."))
"id_%d-host_%s-port_%d".format(config.brokerId, controllerListener.host, controllerListener.port)
}
/**
* On clean shutdown, the controller first determines the partitions that the
* shutting down broker leads, and moves leadership of those partitions to another broker
* that is in that partition's ISR.
*
* @param id Id of the broker to shutdown.
* @return The number of partitions that the broker still leads.
*/
def shutdownBroker(id: Int, controlledShutdownCallback: Try[Set[TopicAndPartition]] => Unit): Unit = {
val controlledShutdownEvent = ControlledShutdown(id, controlledShutdownCallback)
addToControllerEventQueue(controlledShutdownEvent)
}
/**
* This callback is invoked by the zookeeper leader elector on electing the current broker as the new controller.
* It does the following things on the become-controller state change -
* 1. Register controller epoch changed listener
* 2. Increments the controller epoch
* 3. Initializes the controller's context object that holds cache objects for current topics, live brokers and
* leaders for all existing partitions.
* 4. Starts the controller's channel manager
* 5. Starts the replica state machine
* 6. Starts the partition state machine
* If it encounters any unexpected exception/error while becoming controller, it resigns as the current controller.
* This ensures another controller election will be triggered and there will always be an actively serving controller
*/
def onControllerFailover() {
info("Broker %d starting become controller state transition".format(config.brokerId))
readControllerEpochFromZookeeper()
incrementControllerEpoch()
// before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks
registerPartitionReassignmentListener()
registerIsrChangeNotificationListener()
registerPreferredReplicaElectionListener()
registerTopicChangeListener()
registerTopicDeletionListener()
registerBrokerChangeListener()
initializeControllerContext()
// We need to send UpdateMetadataRequest after the controller context is initialized and before the state machines
// are started. The is because brokers need to receive the list of live brokers from UpdateMetadataRequest before
// they can process the LeaderAndIsrRequests that are generated by replicaStateMachine.startup() and
// partitionStateMachine.startup().
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
replicaStateMachine.startup()
partitionStateMachine.startup()
// register the partition change listeners for all existing topics on failover
controllerContext.allTopics.foreach(topic => registerPartitionModificationsListener(topic))
info("Broker %d is ready to serve as the new controller with epoch %d".format(config.brokerId, epoch))
maybeTriggerPartitionReassignment()
maybeTriggerPreferredReplicaElection()
info("starting the controller scheduler")
kafkaScheduler.startup()
if (config.autoLeaderRebalanceEnable) {
scheduleAutoLeaderRebalanceTask(delay = 5, unit = TimeUnit.SECONDS)
}
topicDeletionManager.start()
}
private def scheduleAutoLeaderRebalanceTask(delay: Long, unit: TimeUnit): Unit = {
kafkaScheduler.schedule("auto-leader-rebalance-task", () => addToControllerEventQueue(AutoPreferredReplicaLeaderElection),
delay = delay, unit = unit)
}
/**
* This callback is invoked by the zookeeper leader elector when the current broker resigns as the controller. This is
* required to clean up internal controller data structures
* Note:We need to resign as a controller out of the controller lock to avoid potential deadlock issue
*/
def onControllerResignation() {
debug("Controller resigning, broker id %d".format(config.brokerId))
// de-register listeners
deregisterIsrChangeNotificationListener()
deregisterPartitionReassignmentListener()
deregisterPreferredReplicaElectionListener()
// shutdown delete topic manager
if (topicDeletionManager != null)
topicDeletionManager.shutdown()
// shutdown leader rebalance scheduler
kafkaScheduler.shutdown()
offlinePartitionCount.set(0)
preferredReplicaImbalanceCount.set(0)
// de-register partition ISR listener for on-going partition reassignment task
deregisterPartitionReassignmentIsrChangeListeners()
// shutdown partition state machine
partitionStateMachine.shutdown()
deregisterTopicChangeListener()
partitionModificationsListeners.keys.foreach(deregisterPartitionModificationsListener)
deregisterTopicDeletionListener()
// shutdown replica state machine
replicaStateMachine.shutdown()
deregisterBrokerChangeListener()
// shutdown controller channel manager
if(controllerContext.controllerChannelManager != null) {
controllerContext.controllerChannelManager.shutdown()
controllerContext.controllerChannelManager = null
}
// reset controller context
controllerContext.epoch=0
controllerContext.epochZkVersion=0
brokerState.newState(RunningAsBroker)
info("Broker %d resigned as the controller".format(config.brokerId))
}
/**
* Returns true if this broker is the current controller.
*/
def isActive: Boolean = activeControllerId.get() == config.brokerId
/**
* This callback is invoked by the replica state machine's broker change listener, with the list of newly started
* brokers as input. It does the following -
* 1. Sends update metadata request to all live and shutting down brokers
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. It checks whether there are reassigned replicas assigned to any newly started brokers. If
* so, it performs the reassignment logic for each topic/partition.
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point for two reasons:
* 1. The partition state machine, when triggering online state change, will refresh leader and ISR for only those
* partitions currently new or offline (rather than every partition this controller is aware of)
* 2. Even if we do refresh the cache, there is no guarantee that by the time the leader and ISR request reaches
* every broker that it is still valid. Brokers check the leader epoch to determine validity of the request.
*/
def onBrokerStartup(newBrokers: Seq[Int]) {
info("New broker startup callback for %s".format(newBrokers.mkString(",")))
val newBrokersSet = newBrokers.toSet
// send update metadata request to all live and shutting down brokers. Old brokers will get to know of the new
// broker via this update.
// In cases of controlled shutdown leaders will not be elected when a new broker comes up. So at least in the
// common controlled shutdown case, the metadata will reach the new brokers faster
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
// the very first thing to do when a new broker comes up is send it the entire list of partitions that it is
// supposed to host. Based on that the broker starts the high watermark threads for the input list of partitions
val allReplicasOnNewBrokers = controllerContext.replicasOnBrokers(newBrokersSet)
replicaStateMachine.handleStateChanges(allReplicasOnNewBrokers, OnlineReplica)
// when a new broker comes up, the controller needs to trigger leader election for all new and offline partitions
// to see if these brokers can become leaders for some/all of those
partitionStateMachine.triggerOnlinePartitionStateChange()
// check if reassignment of some partitions need to be restarted
val partitionsWithReplicasOnNewBrokers = controllerContext.partitionsBeingReassigned.filter {
case (_, reassignmentContext) => reassignmentContext.newReplicas.exists(newBrokersSet.contains(_))
}
partitionsWithReplicasOnNewBrokers.foreach(p => onPartitionReassignment(p._1, p._2))
// check if topic deletion needs to be resumed. If at least one replica that belongs to the topic being deleted exists
// on the newly restarted brokers, there is a chance that topic deletion can resume
val replicasForTopicsToBeDeleted = allReplicasOnNewBrokers.filter(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
if(replicasForTopicsToBeDeleted.nonEmpty) {
info(("Some replicas %s for topics scheduled for deletion %s are on the newly restarted brokers %s. " +
"Signaling restart of topic deletion for these topics").format(replicasForTopicsToBeDeleted.mkString(","),
topicDeletionManager.topicsToBeDeleted.mkString(","), newBrokers.mkString(",")))
topicDeletionManager.resumeDeletionForTopics(replicasForTopicsToBeDeleted.map(_.topic))
}
}
/**
* This callback is invoked by the replica state machine's broker change listener with the list of failed brokers
* as input. It does the following -
* 1. Mark partitions with dead leaders as offline
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. Invokes the OfflineReplica state change on the input list of newly started brokers
* 4. If no partitions are effected then send UpdateMetadataRequest to live or shutting down brokers
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point. This is because
* the partition state machine will refresh our cache for us when performing leader election for all new/offline
* partitions coming online.
*/
def onBrokerFailure(deadBrokers: Seq[Int]) {
info("Broker failure callback for %s".format(deadBrokers.mkString(",")))
val deadBrokersThatWereShuttingDown =
deadBrokers.filter(id => controllerContext.shuttingDownBrokerIds.remove(id))
info("Removed %s from list of shutting down brokers.".format(deadBrokersThatWereShuttingDown))
val deadBrokersSet = deadBrokers.toSet
// trigger OfflinePartition state for all partitions whose current leader is one amongst the dead brokers
val partitionsWithoutLeader = controllerContext.partitionLeadershipInfo.filter(partitionAndLeader =>
deadBrokersSet.contains(partitionAndLeader._2.leaderAndIsr.leader) &&
!topicDeletionManager.isTopicQueuedUpForDeletion(partitionAndLeader._1.topic)).keySet
partitionStateMachine.handleStateChanges(partitionsWithoutLeader, OfflinePartition)
// trigger OnlinePartition state changes for offline or new partitions
partitionStateMachine.triggerOnlinePartitionStateChange()
// filter out the replicas that belong to topics that are being deleted
var allReplicasOnDeadBrokers = controllerContext.replicasOnBrokers(deadBrokersSet)
val activeReplicasOnDeadBrokers = allReplicasOnDeadBrokers.filterNot(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
// handle dead replicas
replicaStateMachine.handleStateChanges(activeReplicasOnDeadBrokers, OfflineReplica)
// check if topic deletion state for the dead replicas needs to be updated
val replicasForTopicsToBeDeleted = allReplicasOnDeadBrokers.filter(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
if(replicasForTopicsToBeDeleted.nonEmpty) {
// it is required to mark the respective replicas in TopicDeletionFailed state since the replica cannot be
// deleted when the broker is down. This will prevent the replica from being in TopicDeletionStarted state indefinitely
// since topic deletion cannot be retried until at least one replica is in TopicDeletionStarted state
topicDeletionManager.failReplicaDeletion(replicasForTopicsToBeDeleted)
}
// If broker failure did not require leader re-election, inform brokers of failed broker
// Note that during leader re-election, brokers update their metadata
if (partitionsWithoutLeader.isEmpty) {
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
}
}
/**
* This callback is invoked by the partition state machine's topic change listener with the list of new topics
* and partitions as input. It does the following -
* 1. Registers partition change listener. This is not required until KAFKA-347
* 2. Invokes the new partition callback
* 3. Send metadata request with the new topic to all brokers so they allow requests for that topic to be served
*/
def onNewTopicCreation(topics: Set[String], newPartitions: Set[TopicAndPartition]) {
info("New topic creation callback for %s".format(newPartitions.mkString(",")))
// subscribe to partition changes
topics.foreach(topic => registerPartitionModificationsListener(topic))
onNewPartitionCreation(newPartitions)
}
/**
* This callback is invoked by the topic change callback with the list of failed brokers as input.
* It does the following -
* 1. Move the newly created partitions to the NewPartition state
* 2. Move the newly created partitions from NewPartition->OnlinePartition state
*/
def onNewPartitionCreation(newPartitions: Set[TopicAndPartition]) {
info("New partition creation callback for %s".format(newPartitions.mkString(",")))
partitionStateMachine.handleStateChanges(newPartitions, NewPartition)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions), NewReplica)
partitionStateMachine.handleStateChanges(newPartitions, OnlinePartition, offlinePartitionSelector)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions), OnlineReplica)
}
/**
* This callback is invoked by the reassigned partitions listener. When an admin command initiates a partition
* reassignment, it creates the /admin/reassign_partitions path that triggers the zookeeper listener.
* Reassigning replicas for a partition goes through a few steps listed in the code.
* RAR = Reassigned replicas
* OAR = Original list of replicas for partition
* AR = current assigned replicas
*
* 1. Update AR in ZK with OAR + RAR.
* 2. Send LeaderAndIsr request to every replica in OAR + RAR (with AR as OAR + RAR). We do this by forcing an update
* of the leader epoch in zookeeper.
* 3. Start new replicas RAR - OAR by moving replicas in RAR - OAR to NewReplica state.
* 4. Wait until all replicas in RAR are in sync with the leader.
* 5 Move all replicas in RAR to OnlineReplica state.
* 6. Set AR to RAR in memory.
* 7. If the leader is not in RAR, elect a new leader from RAR. If new leader needs to be elected from RAR, a LeaderAndIsr
* will be sent. If not, then leader epoch will be incremented in zookeeper and a LeaderAndIsr request will be sent.
* In any case, the LeaderAndIsr request will have AR = RAR. This will prevent the leader from adding any replica in
* RAR - OAR back in the isr.
* 8. Move all replicas in OAR - RAR to OfflineReplica state. As part of OfflineReplica state change, we shrink the
* isr to remove OAR - RAR in zookeeper and send a LeaderAndIsr ONLY to the Leader to notify it of the shrunk isr.
* After that, we send a StopReplica (delete = false) to the replicas in OAR - RAR.
* 9. Move all replicas in OAR - RAR to NonExistentReplica state. This will send a StopReplica (delete = true) to
* the replicas in OAR - RAR to physically delete the replicas on disk.
* 10. Update AR in ZK with RAR.
* 11. Update the /admin/reassign_partitions path in ZK to remove this partition.
* 12. After electing leader, the replicas and isr information changes. So resend the update metadata request to every broker.
*
* For example, if OAR = {1, 2, 3} and RAR = {4,5,6}, the values in the assigned replica (AR) and leader/isr path in ZK
* may go through the following transition.
* AR leader/isr
* {1,2,3} 1/{1,2,3} (initial state)
* {1,2,3,4,5,6} 1/{1,2,3} (step 2)
* {1,2,3,4,5,6} 1/{1,2,3,4,5,6} (step 4)
* {1,2,3,4,5,6} 4/{1,2,3,4,5,6} (step 7)
* {1,2,3,4,5,6} 4/{4,5,6} (step 8)
* {4,5,6} 4/{4,5,6} (step 10)
*
* Note that we have to update AR in ZK with RAR last since it's the only place where we store OAR persistently.
* This way, if the controller crashes before that step, we can still recover.
*/
def onPartitionReassignment(topicAndPartition: TopicAndPartition, reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
if (!areReplicasInIsr(topicAndPartition.topic, topicAndPartition.partition, reassignedReplicas)) {
info("New replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned not yet caught up with the leader")
val newReplicasNotInOldReplicaList = reassignedReplicas.toSet -- controllerContext.partitionReplicaAssignment(topicAndPartition).toSet
val newAndOldReplicas = (reassignedPartitionContext.newReplicas ++ controllerContext.partitionReplicaAssignment(topicAndPartition)).toSet
//1. Update AR in ZK with OAR + RAR.
updateAssignedReplicasForPartition(topicAndPartition, newAndOldReplicas.toSeq)
//2. Send LeaderAndIsr request to every replica in OAR + RAR (with AR as OAR + RAR).
updateLeaderEpochAndSendRequest(topicAndPartition, controllerContext.partitionReplicaAssignment(topicAndPartition),
newAndOldReplicas.toSeq)
//3. replicas in RAR - OAR -> NewReplica
startNewReplicasForReassignedPartition(topicAndPartition, reassignedPartitionContext, newReplicasNotInOldReplicaList)
info("Waiting for new replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned to catch up with the leader")
} else {
//4. Wait until all replicas in RAR are in sync with the leader.
val oldReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition).toSet -- reassignedReplicas.toSet
//5. replicas in RAR -> OnlineReplica
reassignedReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition,
replica)), OnlineReplica)
}
//6. Set AR to RAR in memory.
//7. Send LeaderAndIsr request with a potential new leader (if current leader not in RAR) and
// a new AR (using RAR) and same isr to every broker in RAR
moveReassignedPartitionLeaderIfRequired(topicAndPartition, reassignedPartitionContext)
//8. replicas in OAR - RAR -> Offline (force those replicas out of isr)
//9. replicas in OAR - RAR -> NonExistentReplica (force those replicas to be deleted)
stopOldReplicasOfReassignedPartition(topicAndPartition, reassignedPartitionContext, oldReplicas)
//10. Update AR in ZK with RAR.
updateAssignedReplicasForPartition(topicAndPartition, reassignedReplicas)
//11. Update the /admin/reassign_partitions path in ZK to remove this partition.
removePartitionFromReassignedPartitions(topicAndPartition)
info("Removed partition %s from the list of reassigned partitions in zookeeper".format(topicAndPartition))
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
//12. After electing leader, the replicas and isr information changes, so resend the update metadata request to every broker
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(topicAndPartition))
// signal delete topic thread if reassignment for some partitions belonging to topics being deleted just completed
topicDeletionManager.resumeDeletionForTopics(Set(topicAndPartition.topic))
}
}
private def watchIsrChangesForReassignedPartition(topic: String,
partition: Int,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val isrChangeListener = new PartitionReassignmentIsrChangeListener(this, topic, partition, reassignedReplicas.toSet)
reassignedPartitionContext.isrChangeListener = isrChangeListener
// register listener on the leader and isr path to wait until they catch up with the current leader
zkUtils.zkClient.subscribeDataChanges(getTopicPartitionLeaderAndIsrPath(topic, partition), isrChangeListener)
}
def initiateReassignReplicasForTopicPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val newReplicas = reassignedPartitionContext.newReplicas
val topic = topicAndPartition.topic
val partition = topicAndPartition.partition
try {
val assignedReplicasOpt = controllerContext.partitionReplicaAssignment.get(topicAndPartition)
assignedReplicasOpt match {
case Some(assignedReplicas) =>
if (assignedReplicas == newReplicas) {
throw new KafkaException("Partition %s to be reassigned is already assigned to replicas".format(topicAndPartition) +
" %s. Ignoring request for partition reassignment".format(newReplicas.mkString(",")))
} else {
info("Handling reassignment of partition %s to new replicas %s".format(topicAndPartition, newReplicas.mkString(",")))
// first register ISR change listener
watchIsrChangesForReassignedPartition(topic, partition, reassignedPartitionContext)
controllerContext.partitionsBeingReassigned.put(topicAndPartition, reassignedPartitionContext)
// mark topic ineligible for deletion for the partitions being reassigned
topicDeletionManager.markTopicIneligibleForDeletion(Set(topic))
onPartitionReassignment(topicAndPartition, reassignedPartitionContext)
}
case None => throw new KafkaException("Attempt to reassign partition %s that doesn't exist"
.format(topicAndPartition))
}
} catch {
case e: Throwable => error("Error completing reassignment of partition %s".format(topicAndPartition), e)
// remove the partition from the admin path to unblock the admin client
removePartitionFromReassignedPartitions(topicAndPartition)
}
}
def onPreferredReplicaElection(partitions: Set[TopicAndPartition], isTriggeredByAutoRebalance: Boolean = false) {
info("Starting preferred replica leader election for partitions %s".format(partitions.mkString(",")))
try {
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitions
topicDeletionManager.markTopicIneligibleForDeletion(partitions.map(_.topic))
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, preferredReplicaPartitionLeaderSelector)
} catch {
case e: Throwable => error("Error completing preferred replica leader election for partitions %s".format(partitions.mkString(",")), e)
} finally {
removePartitionsFromPreferredReplicaElection(partitions, isTriggeredByAutoRebalance)
}
}
/**
* Invoked when the controller module of a Kafka server is started up. This does not assume that the current broker
* is the controller. It merely registers the session expiration listener and starts the controller leader
* elector
*/
def startup() = {
addToControllerEventQueue(Startup)
controllerEventThread.start()
}
/**
* Invoked when the controller module of a Kafka server is shutting down. If the broker was the current controller,
* it shuts down the partition and replica state machines. If not, those are a no-op. In addition to that, it also
* shuts down the controller channel manager, if one exists (i.e. if it was the current controller)
*/
def shutdown() = {
controllerEventThread.shutdown()
onControllerResignation()
}
def sendRequest(brokerId: Int, apiKey: ApiKeys, request: AbstractRequest.Builder[_ <: AbstractRequest],
callback: AbstractResponse => Unit = null) = {
controllerContext.controllerChannelManager.sendRequest(brokerId, apiKey, request, callback)
}
def incrementControllerEpoch() = {
try {
var newControllerEpoch = controllerContext.epoch + 1
val (updateSucceeded, newVersion) = zkUtils.conditionalUpdatePersistentPathIfExists(
ZkUtils.ControllerEpochPath, newControllerEpoch.toString, controllerContext.epochZkVersion)
if(!updateSucceeded)
throw new ControllerMovedException("Controller moved to another broker. Aborting controller startup procedure")
else {
controllerContext.epochZkVersion = newVersion
controllerContext.epoch = newControllerEpoch
}
} catch {
case _: ZkNoNodeException =>
// if path doesn't exist, this is the first controller whose epoch should be 1
// the following call can still fail if another controller gets elected between checking if the path exists and
// trying to create the controller epoch path
try {
zkUtils.createPersistentPath(ZkUtils.ControllerEpochPath, KafkaController.InitialControllerEpoch.toString)
controllerContext.epoch = KafkaController.InitialControllerEpoch
controllerContext.epochZkVersion = KafkaController.InitialControllerEpochZkVersion
} catch {
case _: ZkNodeExistsException => throw new ControllerMovedException("Controller moved to another broker. " +
"Aborting controller startup procedure")
case oe: Throwable => error("Error while incrementing controller epoch", oe)
}
case oe: Throwable => error("Error while incrementing controller epoch", oe)
}
info("Controller %d incremented epoch to %d".format(config.brokerId, controllerContext.epoch))
}
private def registerSessionExpirationListener() = {
zkUtils.zkClient.subscribeStateChanges(new SessionExpirationListener(this))
}
private def registerControllerChangeListener() = {
zkUtils.zkClient.subscribeDataChanges(ZkUtils.ControllerPath, new ControllerChangeListener(this))
}
private def initializeControllerContext() {
// update controller cache with delete topic information
controllerContext.liveBrokers = zkUtils.getAllBrokersInCluster().toSet
controllerContext.allTopics = zkUtils.getAllTopics().toSet
controllerContext.partitionReplicaAssignment = zkUtils.getReplicaAssignmentForTopics(controllerContext.allTopics.toSeq)
controllerContext.partitionLeadershipInfo = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
controllerContext.shuttingDownBrokerIds = mutable.Set.empty[Int]
// update the leader and isr cache for all existing partitions from Zookeeper
updateLeaderAndIsrCache()
// start the channel manager
startChannelManager()
initializePreferredReplicaElection()
initializePartitionReassignment()
initializeTopicDeletion()
info("Currently active brokers in the cluster: %s".format(controllerContext.liveBrokerIds))
info("Currently shutting brokers in the cluster: %s".format(controllerContext.shuttingDownBrokerIds))
info("Current list of topics in the cluster: %s".format(controllerContext.allTopics))
}
private def initializePreferredReplicaElection() {
// initialize preferred replica election state
val partitionsUndergoingPreferredReplicaElection = zkUtils.getPartitionsUndergoingPreferredReplicaElection()
// check if they are already completed or topic was deleted
val partitionsThatCompletedPreferredReplicaElection = partitionsUndergoingPreferredReplicaElection.filter { partition =>
val replicasOpt = controllerContext.partitionReplicaAssignment.get(partition)
val topicDeleted = replicasOpt.isEmpty
val successful =
if(!topicDeleted) controllerContext.partitionLeadershipInfo(partition).leaderAndIsr.leader == replicasOpt.get.head else false
successful || topicDeleted
}
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitionsUndergoingPreferredReplicaElection
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsThatCompletedPreferredReplicaElection
info("Partitions undergoing preferred replica election: %s".format(partitionsUndergoingPreferredReplicaElection.mkString(",")))
info("Partitions that completed preferred replica election: %s".format(partitionsThatCompletedPreferredReplicaElection.mkString(",")))
info("Resuming preferred replica election for partitions: %s".format(controllerContext.partitionsUndergoingPreferredReplicaElection.mkString(",")))
}
private def initializePartitionReassignment() {
// read the partitions being reassigned from zookeeper path /admin/reassign_partitions
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned()
// check if they are already completed or topic was deleted
val reassignedPartitions = partitionsBeingReassigned.filter { partition =>
val replicasOpt = controllerContext.partitionReplicaAssignment.get(partition._1)
val topicDeleted = replicasOpt.isEmpty
val successful = if (!topicDeleted) replicasOpt.get == partition._2.newReplicas else false
topicDeleted || successful
}.keys
reassignedPartitions.foreach(p => removePartitionFromReassignedPartitions(p))
var partitionsToReassign: mutable.Map[TopicAndPartition, ReassignedPartitionsContext] = new mutable.HashMap
partitionsToReassign ++= partitionsBeingReassigned
partitionsToReassign --= reassignedPartitions
controllerContext.partitionsBeingReassigned ++= partitionsToReassign
info("Partitions being reassigned: %s".format(partitionsBeingReassigned.toString()))
info("Partitions already reassigned: %s".format(reassignedPartitions.toString()))
info("Resuming reassignment of partitions: %s".format(partitionsToReassign.toString()))
}
private def initializeTopicDeletion() {
val topicsQueuedForDeletion = zkUtils.getChildrenParentMayNotExist(ZkUtils.DeleteTopicsPath).toSet
val topicsWithReplicasOnDeadBrokers = controllerContext.partitionReplicaAssignment.filter { case (_, replicas) =>
replicas.exists(r => !controllerContext.liveBrokerIds.contains(r)) }.keySet.map(_.topic)
val topicsForWhichPreferredReplicaElectionIsInProgress = controllerContext.partitionsUndergoingPreferredReplicaElection.map(_.topic)
val topicsForWhichPartitionReassignmentIsInProgress = controllerContext.partitionsBeingReassigned.keySet.map(_.topic)
val topicsIneligibleForDeletion = topicsWithReplicasOnDeadBrokers | topicsForWhichPartitionReassignmentIsInProgress |
topicsForWhichPreferredReplicaElectionIsInProgress
info("List of topics to be deleted: %s".format(topicsQueuedForDeletion.mkString(",")))
info("List of topics ineligible for deletion: %s".format(topicsIneligibleForDeletion.mkString(",")))
// initialize the topic deletion manager
topicDeletionManager = new TopicDeletionManager(this, topicsQueuedForDeletion, topicsIneligibleForDeletion)
}
private def maybeTriggerPartitionReassignment() {
controllerContext.partitionsBeingReassigned.foreach { topicPartitionToReassign =>
initiateReassignReplicasForTopicPartition(topicPartitionToReassign._1, topicPartitionToReassign._2)
}
}
private def maybeTriggerPreferredReplicaElection() {
onPreferredReplicaElection(controllerContext.partitionsUndergoingPreferredReplicaElection.toSet)
}
private def startChannelManager() {
controllerContext.controllerChannelManager = new ControllerChannelManager(controllerContext, config, time, metrics, threadNamePrefix)
controllerContext.controllerChannelManager.startup()
}
def updateLeaderAndIsrCache(topicAndPartitions: Set[TopicAndPartition] = controllerContext.partitionReplicaAssignment.keySet) {
val leaderAndIsrInfo = zkUtils.getPartitionLeaderAndIsrForTopics(zkUtils.zkClient, topicAndPartitions)
for((topicPartition, leaderIsrAndControllerEpoch) <- leaderAndIsrInfo)
controllerContext.partitionLeadershipInfo.put(topicPartition, leaderIsrAndControllerEpoch)
}
private def areReplicasInIsr(topic: String, partition: Int, replicas: Seq[Int]): Boolean = {
zkUtils.getLeaderAndIsrForPartition(topic, partition) match {
case Some(leaderAndIsr) =>
val replicasNotInIsr = replicas.filterNot(r => leaderAndIsr.isr.contains(r))
replicasNotInIsr.isEmpty
case None => false
}
}
private def moveReassignedPartitionLeaderIfRequired(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val currentLeader = controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.leader
// change the assigned replica list to just the reassigned replicas in the cache so it gets sent out on the LeaderAndIsr
// request to the current or new leader. This will prevent it from adding the old replicas to the ISR
val oldAndNewReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
controllerContext.partitionReplicaAssignment.put(topicAndPartition, reassignedReplicas)
if(!reassignedPartitionContext.newReplicas.contains(currentLeader)) {
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is not in the new list of replicas %s. Re-electing leader".format(reassignedReplicas.mkString(",")))
// move the leader to one of the alive and caught up new replicas
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
} else {
// check if the leader is alive or not
if (controllerContext.liveBrokerIds.contains(currentLeader)) {
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s and is alive".format(reassignedReplicas.mkString(",")))
// shrink replication factor and update the leader epoch in zookeeper to use on the next LeaderAndIsrRequest
updateLeaderEpochAndSendRequest(topicAndPartition, oldAndNewReplicas, reassignedReplicas)
} else {
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s but is dead".format(reassignedReplicas.mkString(",")))
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
}
}
}
private def stopOldReplicasOfReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext,
oldReplicas: Set[Int]) {
val topic = topicAndPartition.topic
val partition = topicAndPartition.partition
// first move the replica to offline state (the controller removes it from the ISR)
val replicasToBeDeleted = oldReplicas.map(r => PartitionAndReplica(topic, partition, r))
replicaStateMachine.handleStateChanges(replicasToBeDeleted, OfflineReplica)
// send stop replica command to the old replicas
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionStarted)
// TODO: Eventually partition reassignment could use a callback that does retries if deletion failed
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionSuccessful)
replicaStateMachine.handleStateChanges(replicasToBeDeleted, NonExistentReplica)
}
private def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
replicas: Seq[Int]) {
val partitionsAndReplicasForThisTopic = controllerContext.partitionReplicaAssignment.filter(_._1.topic.equals(topicAndPartition.topic))
partitionsAndReplicasForThisTopic.put(topicAndPartition, replicas)
updateAssignedReplicasForPartition(topicAndPartition, partitionsAndReplicasForThisTopic)
info("Updated assigned replicas for partition %s being reassigned to %s ".format(topicAndPartition, replicas.mkString(",")))
// update the assigned replica list after a successful zookeeper write
controllerContext.partitionReplicaAssignment.put(topicAndPartition, replicas)
}
private def startNewReplicasForReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext,
newReplicas: Set[Int]) {
// send the start replica request to the brokers in the reassigned replicas list that are not in the assigned
// replicas list
newReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, replica)), NewReplica)
}
}
private def updateLeaderEpochAndSendRequest(topicAndPartition: TopicAndPartition, replicasToReceiveRequest: Seq[Int], newAssignedReplicas: Seq[Int]) {
brokerRequestBatch.newBatch()
updateLeaderEpoch(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
try {
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(replicasToReceiveRequest, topicAndPartition.topic,
topicAndPartition.partition, updatedLeaderIsrAndControllerEpoch, newAssignedReplicas)
brokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch)
} catch {
case e : IllegalStateException => {
// Resign if the controller is in an illegal state
error("Forcing the controller to resign")
brokerRequestBatch.clear()
triggerControllerMove()
throw e
}
}
stateChangeLogger.trace(("Controller %d epoch %d sent LeaderAndIsr request %s with new assigned replica list %s " +
"to leader %d for partition being reassigned %s").format(config.brokerId, controllerContext.epoch, updatedLeaderIsrAndControllerEpoch,
newAssignedReplicas.mkString(","), updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader, topicAndPartition))
case None => // fail the reassignment
stateChangeLogger.error(("Controller %d epoch %d failed to send LeaderAndIsr request with new assigned replica list %s " +
"to leader for partition being reassigned %s").format(config.brokerId, controllerContext.epoch,
newAssignedReplicas.mkString(","), topicAndPartition))
}
}
private def registerBrokerChangeListener() = {
zkUtils.zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, brokerChangeListener)
}
private def deregisterBrokerChangeListener() = {
zkUtils.zkClient.unsubscribeChildChanges(ZkUtils.BrokerIdsPath, brokerChangeListener)
}
private def registerTopicChangeListener() = {
zkUtils.zkClient.subscribeChildChanges(BrokerTopicsPath, topicChangeListener)
}
private def deregisterTopicChangeListener() = {
zkUtils.zkClient.unsubscribeChildChanges(BrokerTopicsPath, topicChangeListener)
}
def registerPartitionModificationsListener(topic: String) = {
partitionModificationsListeners.put(topic, new PartitionModificationsListener(this, topic))
zkUtils.zkClient.subscribeDataChanges(getTopicPath(topic), partitionModificationsListeners(topic))
}
def deregisterPartitionModificationsListener(topic: String) = {
zkUtils.zkClient.unsubscribeDataChanges(getTopicPath(topic), partitionModificationsListeners(topic))
partitionModificationsListeners.remove(topic)
}
private def registerTopicDeletionListener() = {
zkUtils.zkClient.subscribeChildChanges(DeleteTopicsPath, topicDeletionListener)
}
private def deregisterTopicDeletionListener() = {
zkUtils.zkClient.unsubscribeChildChanges(DeleteTopicsPath, topicDeletionListener)
}
private def registerPartitionReassignmentListener() = {
zkUtils.zkClient.subscribeDataChanges(ZkUtils.ReassignPartitionsPath, partitionReassignmentListener)
}
private def deregisterPartitionReassignmentListener() = {
zkUtils.zkClient.unsubscribeDataChanges(ZkUtils.ReassignPartitionsPath, partitionReassignmentListener)
}
private def registerIsrChangeNotificationListener() = {
debug("Registering IsrChangeNotificationListener")
zkUtils.zkClient.subscribeChildChanges(ZkUtils.IsrChangeNotificationPath, isrChangeNotificationListener)
}
private def deregisterIsrChangeNotificationListener() = {
debug("De-registering IsrChangeNotificationListener")
zkUtils.zkClient.unsubscribeChildChanges(ZkUtils.IsrChangeNotificationPath, isrChangeNotificationListener)
}
private def registerPreferredReplicaElectionListener() {
zkUtils.zkClient.subscribeDataChanges(ZkUtils.PreferredReplicaLeaderElectionPath, preferredReplicaElectionListener)
}
private def deregisterPreferredReplicaElectionListener() {
zkUtils.zkClient.unsubscribeDataChanges(ZkUtils.PreferredReplicaLeaderElectionPath, preferredReplicaElectionListener)
}
private def deregisterPartitionReassignmentIsrChangeListeners() {
controllerContext.partitionsBeingReassigned.foreach {
case (topicAndPartition, reassignedPartitionsContext) =>
val zkPartitionPath = getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition)
zkUtils.zkClient.unsubscribeDataChanges(zkPartitionPath, reassignedPartitionsContext.isrChangeListener)
}
}
private def readControllerEpochFromZookeeper() {
// initialize the controller epoch and zk version by reading from zookeeper
if(controllerContext.zkUtils.pathExists(ZkUtils.ControllerEpochPath)) {
val epochData = controllerContext.zkUtils.readData(ZkUtils.ControllerEpochPath)
controllerContext.epoch = epochData._1.toInt
controllerContext.epochZkVersion = epochData._2.getVersion
info("Initialized controller epoch to %d and zk version %d".format(controllerContext.epoch, controllerContext.epochZkVersion))
}
}
def removePartitionFromReassignedPartitions(topicAndPartition: TopicAndPartition) {
if(controllerContext.partitionsBeingReassigned.get(topicAndPartition).isDefined) {
// stop watching the ISR changes for this partition
zkUtils.zkClient.unsubscribeDataChanges(getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition),
controllerContext.partitionsBeingReassigned(topicAndPartition).isrChangeListener)
}
// read the current list of reassigned partitions from zookeeper
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned()
// remove this partition from that list
val updatedPartitionsBeingReassigned = partitionsBeingReassigned - topicAndPartition
// write the new list to zookeeper
zkUtils.updatePartitionReassignmentData(updatedPartitionsBeingReassigned.mapValues(_.newReplicas))
// update the cache. NO-OP if the partition's reassignment was never started
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
}
def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
newReplicaAssignmentForTopic: Map[TopicAndPartition, Seq[Int]]) {
try {
val zkPath = getTopicPath(topicAndPartition.topic)
val jsonPartitionMap = zkUtils.replicaAssignmentZkData(newReplicaAssignmentForTopic.map(e => e._1.partition.toString -> e._2))
zkUtils.updatePersistentPath(zkPath, jsonPartitionMap)
debug("Updated path %s with %s for replica assignment".format(zkPath, jsonPartitionMap))
} catch {
case _: ZkNoNodeException => throw new IllegalStateException("Topic %s doesn't exist".format(topicAndPartition.topic))
case e2: Throwable => throw new KafkaException(e2.toString)
}
}
def removePartitionsFromPreferredReplicaElection(partitionsToBeRemoved: Set[TopicAndPartition],
isTriggeredByAutoRebalance : Boolean) {
for(partition <- partitionsToBeRemoved) {
// check the status
val currentLeader = controllerContext.partitionLeadershipInfo(partition).leaderAndIsr.leader
val preferredReplica = controllerContext.partitionReplicaAssignment(partition).head
if(currentLeader == preferredReplica) {
info("Partition %s completed preferred replica leader election. New leader is %d".format(partition, preferredReplica))
} else {
warn("Partition %s failed to complete preferred replica leader election. Leader is %d".format(partition, currentLeader))
}
}
if (!isTriggeredByAutoRebalance)
zkUtils.deletePath(ZkUtils.PreferredReplicaLeaderElectionPath)
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsToBeRemoved
}
/**
* Send the leader information for selected partitions to selected brokers so that they can correctly respond to
* metadata requests
*
* @param brokers The brokers that the update metadata request should be sent to
*/
def sendUpdateMetadataRequest(brokers: Seq[Int], partitions: Set[TopicAndPartition] = Set.empty[TopicAndPartition]) {
try {
brokerRequestBatch.newBatch()
brokerRequestBatch.addUpdateMetadataRequestForBrokers(brokers, partitions)
brokerRequestBatch.sendRequestsToBrokers(epoch)
} catch {
case e : IllegalStateException => {
// Resign if the controller is in an illegal state
error("Forcing the controller to resign")
brokerRequestBatch.clear()
triggerControllerMove()
throw e
}
}
}
/**
* Removes a given partition replica from the ISR; if it is not the current
* leader and there are sufficient remaining replicas in ISR.
*
* @param topic topic
* @param partition partition
* @param replicaId replica Id
* @return the new leaderAndIsr (with the replica removed if it was present),
* or None if leaderAndIsr is empty.
*/
def removeReplicaFromIsr(topic: String, partition: Int, replicaId: Int): Option[LeaderIsrAndControllerEpoch] = {
val topicAndPartition = TopicAndPartition(topic, partition)
debug("Removing replica %d from ISR %s for partition %s.".format(replicaId,
controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.isr.mkString(","), topicAndPartition))
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
val leaderIsrAndEpochOpt = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkUtils, topic, partition)
zkWriteCompleteOrUnnecessary = leaderIsrAndEpochOpt match {
case Some(leaderIsrAndEpoch) => // increment the leader epoch even if the ISR changes
val leaderAndIsr = leaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
if (leaderAndIsr.isr.contains(replicaId)) {
// if the replica to be removed from the ISR is also the leader, set the new leader value to -1
val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NoLeader else leaderAndIsr.leader
var newIsr = leaderAndIsr.isr.filter(b => b != replicaId)
// if the replica to be removed from the ISR is the last surviving member of the ISR and unclean leader election
// is disallowed for the corresponding topic, then we must preserve the ISR membership so that the replica can
// eventually be restored as the leader.
if (newIsr.isEmpty && !LogConfig.fromProps(config.originals, AdminUtils.fetchEntityConfig(zkUtils,
ConfigType.Topic, topicAndPartition.topic)).uncleanLeaderElectionEnable) {
info("Retaining last ISR %d of partition %s since unclean leader election is disabled".format(replicaId, topicAndPartition))
newIsr = leaderAndIsr.isr
}
val newLeaderAndIsr = leaderAndIsr.newLeaderAndIsr(newLeader, newIsr)
// update the new leadership decision in zookeeper or retry
val (updateSucceeded, newVersion) = ReplicationUtils.updateLeaderAndIsr(zkUtils, topic, partition,
newLeaderAndIsr, epoch, leaderAndIsr.zkVersion)
val leaderWithNewVersion = newLeaderAndIsr.withZkVersion(newVersion)
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(leaderWithNewVersion, epoch))
controllerContext.partitionLeadershipInfo.put(topicAndPartition, finalLeaderIsrAndControllerEpoch.get)
if (updateSucceeded) {
info(s"New leader and ISR for partition $topicAndPartition is $leaderWithNewVersion")
}
updateSucceeded
} else {
warn(s"Cannot remove replica $replicaId from ISR of partition $topicAndPartition since it is not in the ISR." +
s" Leader = ${leaderAndIsr.leader} ; ISR = ${leaderAndIsr.isr}")
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(leaderAndIsr, epoch))
controllerContext.partitionLeadershipInfo.put(topicAndPartition, finalLeaderIsrAndControllerEpoch.get)
true
}
case None =>
warn("Cannot remove replica %d from ISR of %s - leaderAndIsr is empty.".format(replicaId, topicAndPartition))
true
}
}
finalLeaderIsrAndControllerEpoch
}
/**
* Does not change leader or isr, but just increments the leader epoch
*
* @param topic topic
* @param partition partition
* @return the new leaderAndIsr with an incremented leader epoch, or None if leaderAndIsr is empty.
*/
private def updateLeaderEpoch(topic: String, partition: Int): Option[LeaderIsrAndControllerEpoch] = {
val topicAndPartition = TopicAndPartition(topic, partition)
debug("Updating leader epoch for partition %s.".format(topicAndPartition))
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
val leaderIsrAndEpochOpt = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkUtils, topic, partition)
zkWriteCompleteOrUnnecessary = leaderIsrAndEpochOpt match {
case Some(leaderIsrAndEpoch) =>
val leaderAndIsr = leaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
// increment the leader epoch even if there are no leader or isr changes to allow the leader to cache the expanded
// assigned replica list
val newLeaderAndIsr = leaderAndIsr.newEpochAndZkVersion
// update the new leadership decision in zookeeper or retry
val (updateSucceeded, newVersion) = ReplicationUtils.updateLeaderAndIsr(zkUtils, topic,
partition, newLeaderAndIsr, epoch, leaderAndIsr.zkVersion)
val leaderWithNewVersion = newLeaderAndIsr.withZkVersion(newVersion)
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(leaderWithNewVersion, epoch))
if (updateSucceeded) {
info(s"Updated leader epoch for partition $topicAndPartition to ${leaderWithNewVersion.leaderEpoch}")
}
updateSucceeded
case None =>
throw new IllegalStateException(s"Cannot update leader epoch for partition $topicAndPartition as " +
"leaderAndIsr path is empty. This could mean we somehow tried to reassign a partition that doesn't exist")
true
}
}
finalLeaderIsrAndControllerEpoch
}
private def checkAndTriggerPartitionRebalance(): Unit = {
trace("checking need to trigger partition rebalance")
var preferredReplicasForTopicsByBrokers: Map[Int, Map[TopicAndPartition, Seq[Int]]] = controllerContext.partitionReplicaAssignment
.filterNot(p => topicDeletionManager.isTopicQueuedUpForDeletion(p._1.topic)).groupBy {
case (_, assignedReplicas) => assignedReplicas.head
}
debug("preferred replicas by broker " + preferredReplicasForTopicsByBrokers)
// for each broker, check if a preferred replica election needs to be triggered
preferredReplicasForTopicsByBrokers.foreach {
case(leaderBroker, topicAndPartitionsForBroker) => {
var imbalanceRatio: Double = 0
var topicsNotInPreferredReplica: Map[TopicAndPartition, Seq[Int]] = topicAndPartitionsForBroker
.filter { case (topicPartition, _) =>
controllerContext.partitionLeadershipInfo.contains(topicPartition) &&
controllerContext.partitionLeadershipInfo(topicPartition).leaderAndIsr.leader != leaderBroker
}
debug("topics not in preferred replica " + topicsNotInPreferredReplica)
val totalTopicPartitionsForBroker = topicAndPartitionsForBroker.size
val totalTopicPartitionsNotLedByBroker = topicsNotInPreferredReplica.size
imbalanceRatio = totalTopicPartitionsNotLedByBroker.toDouble / totalTopicPartitionsForBroker
trace("leader imbalance ratio for broker %d is %f".format(leaderBroker, imbalanceRatio))
// check ratio and if greater than desired ratio, trigger a rebalance for the topic partitions
// that need to be on this broker
if (imbalanceRatio > (config.leaderImbalancePerBrokerPercentage.toDouble / 100)) {
topicsNotInPreferredReplica.keys.foreach { topicPartition =>
// do this check only if the broker is live and there are no partitions being reassigned currently
// and preferred replica election is not in progress
if (controllerContext.liveBrokerIds.contains(leaderBroker) &&
controllerContext.partitionsBeingReassigned.isEmpty &&
controllerContext.partitionsUndergoingPreferredReplicaElection.isEmpty &&
!topicDeletionManager.isTopicQueuedUpForDeletion(topicPartition.topic) &&
controllerContext.allTopics.contains(topicPartition.topic)) {
onPreferredReplicaElection(Set(topicPartition), true)
}
}
}
}
}
}
def getControllerID(): Int = {
controllerContext.zkUtils.readDataMaybeNull(ZkUtils.ControllerPath)._1 match {
case Some(controller) => KafkaController.parseControllerId(controller)
case None => -1
}
}
def addToControllerEventQueue(controllerEvent: ControllerEvent): Unit = {
controllerEventQueue.put(controllerEvent)
}
class ControllerEventThread(name: String) extends ShutdownableThread(name = name) {
override def doWork(): Unit = {
val controllerEvent = controllerEventQueue.take()
try {
controllerEvent.process()
} catch {
case e: Throwable => error("Error processing event " + controllerEvent, e)
}
updateMetrics()
}
}
case class BrokerChange(currentBrokerList: Seq[String]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
ControllerStats.leaderElectionTimer.time {
try {
val curBrokers = currentBrokerList.map(_.toInt).toSet.flatMap(zkUtils.getBrokerInfo)
val curBrokerIds = curBrokers.map(_.id)
val liveOrShuttingDownBrokerIds = controllerContext.liveOrShuttingDownBrokerIds
val newBrokerIds = curBrokerIds -- liveOrShuttingDownBrokerIds
val deadBrokerIds = liveOrShuttingDownBrokerIds -- curBrokerIds
val newBrokers = curBrokers.filter(broker => newBrokerIds(broker.id))
controllerContext.liveBrokers = curBrokers
val newBrokerIdsSorted = newBrokerIds.toSeq.sorted
val deadBrokerIdsSorted = deadBrokerIds.toSeq.sorted
val liveBrokerIdsSorted = curBrokerIds.toSeq.sorted
info("Newly added brokers: %s, deleted brokers: %s, all live brokers: %s"
.format(newBrokerIdsSorted.mkString(","), deadBrokerIdsSorted.mkString(","), liveBrokerIdsSorted.mkString(",")))
newBrokers.foreach(controllerContext.controllerChannelManager.addBroker)
deadBrokerIds.foreach(controllerContext.controllerChannelManager.removeBroker)
if(newBrokerIds.nonEmpty)
onBrokerStartup(newBrokerIdsSorted)
if(deadBrokerIds.nonEmpty)
onBrokerFailure(deadBrokerIdsSorted)
} catch {
case e: Throwable => error("Error while handling broker changes", e)
}
}
}
}
case class TopicChange(topics: Set[String]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
try {
val newTopics = topics -- controllerContext.allTopics
val deletedTopics = controllerContext.allTopics -- topics
controllerContext.allTopics = topics
val addedPartitionReplicaAssignment = zkUtils.getReplicaAssignmentForTopics(newTopics.toSeq)
controllerContext.partitionReplicaAssignment = controllerContext.partitionReplicaAssignment.filter(p =>
!deletedTopics.contains(p._1.topic))
controllerContext.partitionReplicaAssignment.++=(addedPartitionReplicaAssignment)
info("New topics: [%s], deleted topics: [%s], new partition replica assignment [%s]".format(newTopics,
deletedTopics, addedPartitionReplicaAssignment))
if (newTopics.nonEmpty)
onNewTopicCreation(newTopics, addedPartitionReplicaAssignment.keySet)
} catch {
case e: Throwable => error("Error while handling new topic", e)
}
}
}
case class PartitionModifications(topic: String) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
try {
val partitionReplicaAssignment = zkUtils.getReplicaAssignmentForTopics(List(topic))
val partitionsToBeAdded = partitionReplicaAssignment.filter(p =>
!controllerContext.partitionReplicaAssignment.contains(p._1))
if(topicDeletionManager.isTopicQueuedUpForDeletion(topic))
error("Skipping adding partitions %s for topic %s since it is currently being deleted"
.format(partitionsToBeAdded.map(_._1.partition).mkString(","), topic))
else {
if (partitionsToBeAdded.nonEmpty) {
info("New partitions to be added %s".format(partitionsToBeAdded))
controllerContext.partitionReplicaAssignment.++=(partitionsToBeAdded)
onNewPartitionCreation(partitionsToBeAdded.keySet)
}
}
} catch {
case e: Throwable => error("Error while handling add partitions for topic " + topic, e)
}
}
}
case class TopicDeletion(var topicsToBeDeleted: Set[String]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
debug("Delete topics listener fired for topics %s to be deleted".format(topicsToBeDeleted.mkString(",")))
val nonExistentTopics = topicsToBeDeleted -- controllerContext.allTopics
if (nonExistentTopics.nonEmpty) {
warn("Ignoring request to delete non-existing topics " + nonExistentTopics.mkString(","))
nonExistentTopics.foreach(topic => zkUtils.deletePathRecursive(getDeleteTopicPath(topic)))
}
topicsToBeDeleted --= nonExistentTopics
if (config.deleteTopicEnable) {
if (topicsToBeDeleted.nonEmpty) {
info("Starting topic deletion for topics " + topicsToBeDeleted.mkString(","))
// mark topic ineligible for deletion if other state changes are in progress
topicsToBeDeleted.foreach { topic =>
val preferredReplicaElectionInProgress =
controllerContext.partitionsUndergoingPreferredReplicaElection.map(_.topic).contains(topic)
val partitionReassignmentInProgress =
controllerContext.partitionsBeingReassigned.keySet.map(_.topic).contains(topic)
if (preferredReplicaElectionInProgress || partitionReassignmentInProgress)
topicDeletionManager.markTopicIneligibleForDeletion(Set(topic))
}
// add topic to deletion list
topicDeletionManager.enqueueTopicsForDeletion(topicsToBeDeleted)
}
} else {
// If delete topic is disabled remove entries under zookeeper path : /admin/delete_topics
for (topic <- topicsToBeDeleted) {
info("Removing " + getDeleteTopicPath(topic) + " since delete topic is disabled")
zkUtils.zkClient.delete(getDeleteTopicPath(topic))
}
}
}
}
case class PartitionReassignment(partitionReassignment: Map[TopicAndPartition, Seq[Int]]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
val partitionsToBeReassigned = partitionReassignment.filterNot(p => controllerContext.partitionsBeingReassigned.contains(p._1))
partitionsToBeReassigned.foreach { partitionToBeReassigned =>
if(topicDeletionManager.isTopicQueuedUpForDeletion(partitionToBeReassigned._1.topic)) {
error("Skipping reassignment of partition %s for topic %s since it is currently being deleted"
.format(partitionToBeReassigned._1, partitionToBeReassigned._1.topic))
removePartitionFromReassignedPartitions(partitionToBeReassigned._1)
} else {
val context = ReassignedPartitionsContext(partitionToBeReassigned._2)
initiateReassignReplicasForTopicPartition(partitionToBeReassigned._1, context)
}
}
}
}
case class PartitionReassignmentIsrChange(topicAndPartition: TopicAndPartition, reassignedReplicas: Set[Int]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
try {
// check if this partition is still being reassigned or not
controllerContext.partitionsBeingReassigned.get(topicAndPartition) match {
case Some(reassignedPartitionContext) =>
// need to re-read leader and isr from zookeeper since the zkclient callback doesn't return the Stat object
val newLeaderAndIsrOpt = zkUtils.getLeaderAndIsrForPartition(topicAndPartition.topic, topicAndPartition.partition)
newLeaderAndIsrOpt match {
case Some(leaderAndIsr) => // check if new replicas have joined ISR
val caughtUpReplicas = reassignedReplicas & leaderAndIsr.isr.toSet
if(caughtUpReplicas == reassignedReplicas) {
// resume the partition reassignment process
info("%d/%d replicas have caught up with the leader for partition %s being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topicAndPartition) +
"Resuming partition reassignment")
onPartitionReassignment(topicAndPartition, reassignedPartitionContext)
}
else {
info("%d/%d replicas have caught up with the leader for partition %s being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topicAndPartition) +
"Replica(s) %s still need to catch up".format((reassignedReplicas -- leaderAndIsr.isr.toSet).mkString(",")))
}
case None => error("Error handling reassignment of partition %s to replicas %s as it was never created"
.format(topicAndPartition, reassignedReplicas.mkString(",")))
}
case None =>
}
} catch {
case e: Throwable => error("Error while handling partition reassignment", e)
}
}
}
case class IsrChangeNotification(sequenceNumbers: Seq[String]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
try {
val topicAndPartitions = sequenceNumbers.flatMap(getTopicAndPartition).toSet
if (topicAndPartitions.nonEmpty) {
updateLeaderAndIsrCache(topicAndPartitions)
processUpdateNotifications(topicAndPartitions)
}
} finally {
// delete the notifications
sequenceNumbers.map(x => controllerContext.zkUtils.deletePath(ZkUtils.IsrChangeNotificationPath + "/" + x))
}
}
private def processUpdateNotifications(topicAndPartitions: immutable.Set[TopicAndPartition]) {
val liveBrokers: Seq[Int] = controllerContext.liveOrShuttingDownBrokerIds.toSeq
debug("Sending MetadataRequest to Brokers:" + liveBrokers + " for TopicAndPartitions:" + topicAndPartitions)
sendUpdateMetadataRequest(liveBrokers, topicAndPartitions)
}
private def getTopicAndPartition(child: String): Set[TopicAndPartition] = {
val changeZnode: String = ZkUtils.IsrChangeNotificationPath + "/" + child
val (jsonOpt, _) = controllerContext.zkUtils.readDataMaybeNull(changeZnode)
if (jsonOpt.isDefined) {
val json = Json.parseFull(jsonOpt.get)
json match {
case Some(m) =>
val topicAndPartitions: mutable.Set[TopicAndPartition] = new mutable.HashSet[TopicAndPartition]()
val isrChanges = m.asInstanceOf[Map[String, Any]]
val topicAndPartitionList = isrChanges("partitions").asInstanceOf[List[Any]]
topicAndPartitionList.foreach {
case tp =>
val topicAndPartition = tp.asInstanceOf[Map[String, Any]]
val topic = topicAndPartition("topic").asInstanceOf[String]
val partition = topicAndPartition("partition").asInstanceOf[Int]
topicAndPartitions += TopicAndPartition(topic, partition)
}
topicAndPartitions
case None =>
error("Invalid topic and partition JSON: " + jsonOpt.get + " in ZK: " + changeZnode)
Set.empty
}
} else {
Set.empty
}
}
}
case class PreferredReplicaLeaderElection(partitionsForPreferredReplicaElection: Set[TopicAndPartition]) extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
if (controllerContext.partitionsUndergoingPreferredReplicaElection.nonEmpty)
info("These partitions are already undergoing preferred replica election: %s"
.format(controllerContext.partitionsUndergoingPreferredReplicaElection.mkString(",")))
val partitions = partitionsForPreferredReplicaElection -- controllerContext.partitionsUndergoingPreferredReplicaElection
val partitionsForTopicsToBeDeleted = partitions.filter(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
if (partitionsForTopicsToBeDeleted.nonEmpty) {
error("Skipping preferred replica election for partitions %s since the respective topics are being deleted"
.format(partitionsForTopicsToBeDeleted))
}
onPreferredReplicaElection(partitions -- partitionsForTopicsToBeDeleted)
}
}
case object AutoPreferredReplicaLeaderElection extends ControllerEvent {
override def process(): Unit = {
if (!isActive) return
try {
checkAndTriggerPartitionRebalance()
} finally {
scheduleAutoLeaderRebalanceTask(delay = config.leaderImbalanceCheckIntervalSeconds, unit = TimeUnit.SECONDS)
}
}
}
case class ControlledShutdown(id: Int, controlledShutdownCallback: Try[Set[TopicAndPartition]] => Unit) extends ControllerEvent {
override def process(): Unit = {
val controlledShutdownResult = Try { doControlledShutdown(id) }
controlledShutdownCallback(controlledShutdownResult)
}
private def doControlledShutdown(id: Int): Set[TopicAndPartition] = {
if (!isActive) {
throw new ControllerMovedException("Controller moved to another broker. Aborting controlled shutdown")
}
info("Shutting down broker " + id)
if (!controllerContext.liveOrShuttingDownBrokerIds.contains(id))
throw new BrokerNotAvailableException("Broker id %d does not exist.".format(id))
controllerContext.shuttingDownBrokerIds.add(id)
debug("All shutting down brokers: " + controllerContext.shuttingDownBrokerIds.mkString(","))
debug("Live brokers: " + controllerContext.liveBrokerIds.mkString(","))
val allPartitionsAndReplicationFactorOnBroker: Set[(TopicAndPartition, Int)] =
controllerContext.partitionsOnBroker(id)
.map(topicAndPartition => (topicAndPartition, controllerContext.partitionReplicaAssignment(topicAndPartition).size))
allPartitionsAndReplicationFactorOnBroker.foreach {
case(topicAndPartition, replicationFactor) =>
controllerContext.partitionLeadershipInfo.get(topicAndPartition).foreach { currLeaderIsrAndControllerEpoch =>
if (replicationFactor > 1) {
if (currLeaderIsrAndControllerEpoch.leaderAndIsr.leader == id) {
// If the broker leads the topic partition, transition the leader and update isr. Updates zk and
// notifies all affected brokers
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition,
controlledShutdownPartitionLeaderSelector)
} else {
// Stop the replica first. The state change below initiates ZK changes which should take some time
// before which the stop replica request should be completed (in most cases)
try {
brokerRequestBatch.newBatch()
brokerRequestBatch.addStopReplicaRequestForBrokers(Seq(id), topicAndPartition.topic,
topicAndPartition.partition, deletePartition = false)
brokerRequestBatch.sendRequestsToBrokers(epoch)
} catch {
case e : IllegalStateException => {
// Resign if the controller is in an illegal state
error("Forcing the controller to resign")
brokerRequestBatch.clear()
triggerControllerMove()
throw e
}
}
// If the broker is a follower, updates the isr in ZK and notifies the current leader
replicaStateMachine.handleStateChanges(Set(PartitionAndReplica(topicAndPartition.topic,
topicAndPartition.partition, id)), OfflineReplica)
}
}
}
}
def replicatedPartitionsBrokerLeads() = {
trace("All leaders = " + controllerContext.partitionLeadershipInfo.mkString(","))
controllerContext.partitionLeadershipInfo.filter {
case (topicAndPartition, leaderIsrAndControllerEpoch) =>
leaderIsrAndControllerEpoch.leaderAndIsr.leader == id && controllerContext.partitionReplicaAssignment(topicAndPartition).size > 1
}.keys
}
replicatedPartitionsBrokerLeads().toSet
}
}
case class TopicDeletionStopReplicaResult(stopReplicaResponseObj: AbstractResponse, replicaId: Int) extends ControllerEvent {
override def process(): Unit = {
import JavaConverters._
if (!isActive) return
val stopReplicaResponse = stopReplicaResponseObj.asInstanceOf[StopReplicaResponse]
debug("Delete topic callback invoked for %s".format(stopReplicaResponse))
val responseMap = stopReplicaResponse.responses.asScala
val partitionsInError =
if (stopReplicaResponse.error != Errors.NONE) responseMap.keySet
else responseMap.filter { case (_, error) => error != Errors.NONE }.keySet
val replicasInError = partitionsInError.map(p => PartitionAndReplica(p.topic, p.partition, replicaId))
// move all the failed replicas to ReplicaDeletionIneligible
topicDeletionManager.failReplicaDeletion(replicasInError)
if (replicasInError.size != responseMap.size) {
// some replicas could have been successfully deleted
val deletedReplicas = responseMap.keySet -- partitionsInError
topicDeletionManager.completeReplicaDeletion(deletedReplicas.map(p => PartitionAndReplica(p.topic, p.partition, replicaId)))
}
}
}
case object Startup extends ControllerEvent {
override def process(): Unit = {
registerSessionExpirationListener()
registerControllerChangeListener()
elect()
}
}
case class ControllerChange(newControllerId: Int) extends ControllerEvent {
override def process(): Unit = {
val wasActiveBeforeChange = isActive
activeControllerId.set(newControllerId)
if (wasActiveBeforeChange && !isActive) {
onControllerResignation()
}
}
}
case object Reelect extends ControllerEvent {
override def process(): Unit = {
val wasActiveBeforeChange = isActive
activeControllerId.set(getControllerID())
if (wasActiveBeforeChange && !isActive) {
onControllerResignation()
}
elect()
}
}
private def updateMetrics(): Unit = {
val opc = if (!isActive)
0
else
controllerContext.partitionLeadershipInfo.count(p =>
!controllerContext.liveOrShuttingDownBrokerIds.contains(p._2.leaderAndIsr.leader) &&
!topicDeletionManager.isTopicQueuedUpForDeletion(p._1.topic)
)
offlinePartitionCount.set(opc)
val pric = if (!isActive)
0
else
controllerContext.partitionReplicaAssignment.count { case (topicPartition, replicas) =>
controllerContext.partitionLeadershipInfo.contains(topicPartition) &&
controllerContext.partitionLeadershipInfo(topicPartition).leaderAndIsr.leader != replicas.head &&
!topicDeletionManager.isTopicQueuedUpForDeletion(topicPartition.topic)
}
preferredReplicaImbalanceCount.set(pric)
}
private def triggerControllerMove(): Unit = {
activeControllerId.set(-1)
controllerContext.zkUtils.deletePath(ZkUtils.ControllerPath)
}
def elect(): Unit = {
val timestamp = time.milliseconds
val electString = ZkUtils.controllerZkData(config.brokerId, timestamp)
activeControllerId.set(getControllerID())
/*
* We can get here during the initial startup and the handleDeleted ZK callback. Because of the potential race condition,
* it's possible that the controller has already been elected when we get here. This check will prevent the following
* createEphemeralPath method from getting into an infinite loop if this broker is already the controller.
*/
if(activeControllerId.get() != -1) {
debug("Broker %d has been elected as the controller, so stopping the election process.".format(activeControllerId.get()))
return
}
try {
val zkCheckedEphemeral = new ZKCheckedEphemeral(ZkUtils.ControllerPath,
electString,
controllerContext.zkUtils.zkConnection.getZookeeper,
controllerContext.zkUtils.isSecure)
zkCheckedEphemeral.create()
info(config.brokerId + " successfully elected as the controller")
activeControllerId.set(config.brokerId)
onControllerFailover()
} catch {
case _: ZkNodeExistsException =>
// If someone else has written the path, then
activeControllerId.set(getControllerID)
if (activeControllerId.get() != -1)
debug("Broker %d was elected as controller instead of broker %d".format(activeControllerId.get(), config.brokerId))
else
warn("A controller has been elected but just resigned, this will result in another round of election")
case e2: Throwable =>
error("Error while electing or becoming controller on broker %d".format(config.brokerId), e2)
triggerControllerMove()
}
}
}
/**
* This is the zookeeper listener that triggers all the state transitions for a replica
*/
class BrokerChangeListener(controller: KafkaController) extends IZkChildListener with Logging {
override def handleChildChange(parentPath: String, currentChilds: java.util.List[String]): Unit = {
import JavaConverters._
controller.addToControllerEventQueue(controller.BrokerChange(currentChilds.asScala))
}
}
class TopicChangeListener(controller: KafkaController) extends IZkChildListener with Logging {
override def handleChildChange(parentPath: String, currentChilds: java.util.List[String]): Unit = {
import JavaConverters._
controller.addToControllerEventQueue(controller.TopicChange(currentChilds.asScala.toSet))
}
}
class PartitionModificationsListener(controller: KafkaController, topic: String) extends IZkDataListener with Logging {
override def handleDataChange(dataPath: String, data: Any): Unit = {
controller.addToControllerEventQueue(controller.PartitionModifications(topic))
}
override def handleDataDeleted(dataPath: String): Unit = {}
}
/**
* Delete topics includes the following operations -
* 1. Add the topic to be deleted to the delete topics cache, only if the topic exists
* 2. If there are topics to be deleted, it signals the delete topic thread
*/
class TopicDeletionListener(controller: KafkaController) extends IZkChildListener with Logging {
override def handleChildChange(parentPath: String, currentChilds: java.util.List[String]): Unit = {
import JavaConverters._
controller.addToControllerEventQueue(controller.TopicDeletion(currentChilds.asScala.toSet))
}
}
/**
* Starts the partition reassignment process unless -
* 1. Partition previously existed
* 2. New replicas are the same as existing replicas
* 3. Any replica in the new set of replicas are dead
* If any of the above conditions are satisfied, it logs an error and removes the partition from list of reassigned
* partitions.
*/
class PartitionReassignmentListener(controller: KafkaController) extends IZkDataListener with Logging {
override def handleDataChange(dataPath: String, data: Any): Unit = {
val partitionReassignment = ZkUtils.parsePartitionReassignmentData(data.toString)
controller.addToControllerEventQueue(controller.PartitionReassignment(partitionReassignment))
}
override def handleDataDeleted(dataPath: String): Unit = {}
}
class PartitionReassignmentIsrChangeListener(controller: KafkaController, topic: String, partition: Int, reassignedReplicas: Set[Int]) extends IZkDataListener with Logging {
override def handleDataChange(dataPath: String, data: Any): Unit = {
controller.addToControllerEventQueue(controller.PartitionReassignmentIsrChange(TopicAndPartition(topic, partition), reassignedReplicas))
}
override def handleDataDeleted(dataPath: String): Unit = {}
}
/**
* Called when replica leader initiates isr change
*/
class IsrChangeNotificationListener(controller: KafkaController) extends IZkChildListener with Logging {
override def handleChildChange(parentPath: String, currentChilds: java.util.List[String]): Unit = {
import JavaConverters._
controller.addToControllerEventQueue(controller.IsrChangeNotification(currentChilds.asScala))
}
}
object IsrChangeNotificationListener {
val version: Long = 1L
}
/**
* Starts the preferred replica leader election for the list of partitions specified under
* /admin/preferred_replica_election -
*/
class PreferredReplicaElectionListener(controller: KafkaController) extends IZkDataListener with Logging {
override def handleDataChange(dataPath: String, data: Any): Unit = {
val partitions = PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(data.toString)
controller.addToControllerEventQueue(controller.PreferredReplicaLeaderElection(partitions))
}
override def handleDataDeleted(dataPath: String): Unit = {}
}
class ControllerChangeListener(controller: KafkaController) extends IZkDataListener {
override def handleDataChange(dataPath: String, data: Any): Unit = {
controller.addToControllerEventQueue(controller.ControllerChange(KafkaController.parseControllerId(data.toString)))
}
override def handleDataDeleted(dataPath: String): Unit = {
controller.addToControllerEventQueue(controller.Reelect)
}
}
class SessionExpirationListener(controller: KafkaController) extends IZkStateListener with Logging {
override def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception On any error.
*/
@throws[Exception]
override def handleNewSession(): Unit = {
controller.addToControllerEventQueue(controller.Reelect)
}
override def handleSessionEstablishmentError(error: Throwable): Unit = {
//no-op handleSessionEstablishmentError in KafkaHealthCheck should handle this error in its handleSessionEstablishmentError
}
}
case class ReassignedPartitionsContext(var newReplicas: Seq[Int] = Seq.empty,
var isrChangeListener: PartitionReassignmentIsrChangeListener = null)
case class PartitionAndReplica(topic: String, partition: Int, replica: Int) {
override def toString: String = {
"[Topic=%s,Partition=%d,Replica=%d]".format(topic, partition, replica)
}
}
case class LeaderIsrAndControllerEpoch(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int) {
override def toString: String = {
val leaderAndIsrInfo = new StringBuilder
leaderAndIsrInfo.append("(Leader:" + leaderAndIsr.leader)
leaderAndIsrInfo.append(",ISR:" + leaderAndIsr.isr.mkString(","))
leaderAndIsrInfo.append(",LeaderEpoch:" + leaderAndIsr.leaderEpoch)
leaderAndIsrInfo.append(",ControllerEpoch:" + controllerEpoch + ")")
leaderAndIsrInfo.toString()
}
}
object ControllerStats extends KafkaMetricsGroup {
private val _uncleanLeaderElectionRate = newMeter("UncleanLeaderElectionsPerSec", "elections", TimeUnit.SECONDS)
private val _leaderElectionTimer = new KafkaTimer(newTimer("LeaderElectionRateAndTimeMs", TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
// KafkaServer needs to initialize controller metrics during startup. We perform initialization
// through method calls to avoid Scala compiler warnings.
def uncleanLeaderElectionRate: Meter = _uncleanLeaderElectionRate
def leaderElectionTimer: KafkaTimer = _leaderElectionTimer
}
sealed trait ControllerEvent {
def process(): Unit
}
| rhauch/kafka | core/src/main/scala/kafka/controller/KafkaController.scala | Scala | apache-2.0 | 92,935 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.application.tnrb
import iht.forms.TnrbForms._
import iht.models.application.tnrb.TnrbEligibiltyModel
import iht.testhelpers.{CommonBuilder, TestHelper}
import iht.utils.CommonHelper
import iht.views.application.{CancelComponent, SubmittableApplicationPageBehaviour}
import iht.views.html.application.tnrb.partner_name
import org.joda.time.LocalDate
import play.api.data.Form
import play.twirl.api.HtmlFormat.Appendable
class PartnerNameViewTest extends SubmittableApplicationPageBehaviour[TnrbEligibiltyModel] {
def tnrbModel = CommonBuilder.buildTnrbEligibility
def widowCheck = CommonBuilder.buildWidowedCheck
val deceasedDetailsName = CommonBuilder.buildDeceasedDetails.name
override def pageTitle = messagesApi("page.iht.application.TnrbEligibilty.partnerName.label", messagesApi(TestHelper.spouseMessageKey))
override def browserTitle = messagesApi("page.iht.application.TnrbEligibilty.partnerName.label", messagesApi(TestHelper.spouseMessageKey))
override def guidance = guidance(
Set(
messagesApi( "page.iht.application.TnrbEligibilty.partnerName.hint", "2000")
)
)
override def formTarget = Some(iht.controllers.application.tnrb.routes.PartnerNameController.onSubmit())
override def form: Form[TnrbEligibiltyModel] = partnerNameForm
lazy val partnerNameView: partner_name = app.injector.instanceOf[partner_name]
override def formToView: Form[TnrbEligibiltyModel] => Appendable =
form =>
partnerNameView(form, Some(new LocalDate(2000,10,1)),
CommonHelper.addFragmentIdentifier(iht.controllers.application.tnrb.routes.TnrbOverviewController.onPageLoad(), Some(TestHelper.TnrbSpouseNameID))
)
override val cancelId: String = "cancel-button"
override def cancelComponent = Some(
CancelComponent(iht.controllers.application.tnrb.routes.TnrbOverviewController.onPageLoad(),
messagesApi("page.iht.application.tnrb.returnToIncreasingThreshold"),
TestHelper.TnrbSpouseNameID
)
)
"Partner Name View" must {
behave like applicationPageWithErrorSummaryBox()
"have a first name label with hint text" in {
labelShouldBe(doc, "firstName-container", messagesApi("iht.firstName"))
labelHelpTextShouldBe(doc, "firstName-container", "iht.firstName.hint")
}
"have a first name field" in {
Option(doc.getElementById("firstName")).isDefined mustBe true
}
"have a last name label" in {
elementShouldHaveText(doc, "lastName-container", messagesApi("iht.lastName"))
}
"have a last name field" in {
Option(doc.getElementById("lastName")).isDefined mustBe true
}
}
}
| hmrc/iht-frontend | test/iht/views/application/tnrb/PartnerNameViewTest.scala | Scala | apache-2.0 | 3,247 |
package io.backchat.scapulet
package stanza
import xml._
trait ReplyMethods {
self: StanzaHandler ⇒
import CoreExt._
/**
* Creates an info query reply stanza, additional content can be added in the curry function
*
* @param ns The namespace for this reply
* @param content The child nodes for this stanza
*
* @return a NodeSeq representing the stanza
*/
protected def iqReply[TNode <: NodeSeq](ns: String)(content: ⇒ NodeSeq) = {
<iq type="result" id={ (lastStanza \\ "@id").text } to={ (lastStanza \\ "@from").text } from={ (lastStanza \\ "@to").text }>
<query xmlns={ ns }>
{ content }
</query>
</iq>
}
/**
* Creates a presence stanza
*
* @param presType An optional presence type if none is given it's assumed to be 'available'
* @param children The child nodes for this stanza
*
* @return a NodeSeq representing the presence stanza
*/
protected def presence(presType: Option[String] = None)(children: Seq[Node]) = {
val ele = <presence to={ (lastStanza \\ "@from").text } from={ (lastStanza \\ "@to").text }>
{ children }
</presence>
if (presType.isDefined) {
(presType map {
pt ⇒ ele % Map("type" -> pt)
}).get
} else ele
}
} | backchatio/scapulet | src/main/scala/io/backchat/scapulet/stanza/ReplyMethods.scala | Scala | bsd-3-clause | 1,286 |
package BIDMach.networks.layers
import BIDMat.{Mat,ND,SBMat,CMat,CSMat,DMat,FMat,FND,IMat,LMat,HMat,GMat,GDMat,GIMat,GLMat,GSMat,GSDMat,GND,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.datasources._
import BIDMach.updaters._
import BIDMach.mixins._
import BIDMach.models._
import BIDMach.networks._
import BIDMach._
import scala.util.hashing.MurmurHash3;
import scala.collection.mutable.HashMap;
/**
* LSTM unit
*/
class LSTMLayer(override val net:Net, override val opts:LSTMNode = new LSTMNode) extends CompoundLayer(net, opts) {
override val _inputs = new Array[LayerTerm](3);
override val _outputs = new Array[ND](2);
override val _derivs = new Array[ND](2);
override def toString = {
"LSTM@"+Integer.toHexString(hashCode % 0x10000).toString
}
}
trait LSTMNodeOpts extends CompoundNodeOpts {
var dim = 0;
var kind = 1;
var hasBias = false;
var scoreType = 0;
var outdim = 0;
def copyOpts(opts:LSTMNodeOpts):LSTMNodeOpts = {
super.copyOpts(opts);
opts.dim = dim;
opts.kind = kind;
opts.hasBias = hasBias;
opts.scoreType = scoreType;
opts.outdim = outdim;
opts;
}
}
class LSTMNode extends CompoundNode with LSTMNodeOpts {
override val inputs:Array[NodeTerm] = Array(null, null, null);
// override val inputTerminals:Array[Int] = Array(0,0,0);
def constructGraph = {
kind match {
case 0 => constructGraph0
case 1 => constructGraph1
case 2 => constructGraph2
case 3 => constructGraph3
case 4 => constructGraph4
case _ => throw new RuntimeException("LSTMLayer type %d not recognized" format kind);
}
}
// Basic LSTM topology with 8 linear layers
def constructGraph0 = {
import BIDMach.networks.layers.Node._
val odim = dim;
val in_h = copy;
val in_c = copy;
val in_i = copy;
val lin1 = linear(in_h)(prefix+"LSTM_h_in_gate", outdim=odim, hasBias=hasBias);
val lin2 = linear(in_h)(prefix+"LSTM_h_out_gate", outdim=odim, hasBias=hasBias);
val lin3 = linear(in_h)(prefix+"LSTM_h_forget_gate", outdim=odim, hasBias=hasBias);
val lin4 = linear(in_h)(prefix+"LSTM_h_tanh_gate", outdim=odim, hasBias=hasBias);
val lin5 = linear(in_i)(prefix+"LSTM_i_in_gate", outdim=odim, hasBias=hasBias);
val lin6 = linear(in_i)(prefix+"LSTM_i_out_gate", outdim=odim, hasBias=hasBias);
val lin7 = linear(in_i)(prefix+"LSTM_i_forget_gate", outdim=odim, hasBias=hasBias);
val lin8 = linear(in_i)(prefix+"LSTM_i_tanh_gate", outdim=odim, hasBias=hasBias);
val sum1 = lin1 + lin5;
val sum2 = lin2 + lin6;
val sum3 = lin3 + lin7;
val sum4 = lin4 + lin8;
val in_gate = σ(sum1);
val out_gate = σ(sum2);
val forget_gate = σ(sum3);
val in_sat = tanh(sum4);
val in_prod = in_gate ∘ in_sat;
val f_prod = forget_gate ∘ in_c;
val out_c = in_prod + f_prod;
val out_tanh = tanh(out_c);
val out_h = out_gate ∘ out_tanh;
grid = (in_h on in_c on in_i on null) \\ (lin1 \\ lin5 \\ sum1 \\ in_gate \\ in_prod \\ out_tanh on
lin2 \\ lin6 \\ sum2 \\ out_gate \\ f_prod \\ out_h on
lin3 \\ lin7 \\ sum3 \\ forget_gate \\ out_c \\ null on
lin4 \\ lin8 \\ sum4 \\ in_sat \\ null \\ null);
val lopts = grid.data;
lopts.map((x:Node) => if (x != null) x.parent = this);
outputNumbers = Array(lopts.indexOf(out_h), lopts.indexOf(out_c));
}
// LSTM with 4 linear layers, with h and i stacked as inputs
def constructGraph1 = {
import BIDMach.networks.layers.Node._
val odim = dim;
val in_h = copy;
val in_c = copy;
val in_i = copy;
val h_over_i = in_h over in_i;
val lin1 = linear(h_over_i)(prefix+"LSTM_in_gate", outdim=odim, hasBias=hasBias);
val lin2 = linear(h_over_i)(prefix+"LSTM_out_gate", outdim=odim, hasBias=hasBias);
val lin3 = linear(h_over_i)(prefix+"LSTM_forget_gate", outdim=odim, hasBias=hasBias);
val lin4 = linear(h_over_i)(prefix+"LSTM_tanh_gate", outdim=odim, hasBias=hasBias);
val in_gate = σ(lin1);
val out_gate = σ(lin2);
val forget_gate = σ(lin3);
val in_sat = tanh(lin4);
val in_prod = in_gate ∘ in_sat;
val f_prod = forget_gate ∘ in_c;
val out_c = in_prod + f_prod;
val out_tanh = tanh(out_c);
val out_h = out_gate ∘ out_tanh;
grid = in_h \\ lin1 \\ in_gate \\ in_prod \\ out_tanh on
in_c \\ lin2 \\ out_gate \\ f_prod \\ out_h on
in_i \\ lin3 \\ forget_gate \\ out_c \\ null on
h_over_i \\ lin4 \\ in_sat \\ null \\ null;
val lopts = grid.data;
lopts.map((x:Node) => if (x != null) x.parent = this);
outputNumbers = Array(lopts.indexOf(out_h), lopts.indexOf(out_c));
}
// LSTM with 1 linear layer, with h and i stacked as inputs, and all 4 output stacked
def constructGraph2 = {
import BIDMach.networks.layers.Node._
val odim = dim;
val in_h = copy;
val in_c = copy;
val in_i = copy;
val h_over_i = in_h over in_i;
val lin = linear(h_over_i)(prefix+"LSTM_all", outdim=4*odim, hasBias=hasBias);
val sp = splitvert(lin, 4);
val in_gate = σ(sp(0));
val out_gate = σ(sp(1));
val forget_gate = σ(sp(2));
val in_sat = tanh(sp(3));
val in_prod = in_gate ∘ in_sat;
val f_prod = forget_gate ∘ in_c;
val out_c = in_prod + f_prod;
val out_tanh = tanh(out_c);
val out_h = out_gate ∘ out_tanh;
grid = in_h \\ lin \\ in_gate \\ in_prod \\ out_tanh on
in_c \\ sp \\ out_gate \\ f_prod \\ out_h on
in_i \\ null \\ forget_gate \\ out_c \\ null on
h_over_i \\ null \\ in_sat \\ null \\ null;
val lopts = grid.data;
lopts.map((x:Node) => if (x != null) x.parent = this);
outputNumbers = Array(lopts.indexOf(out_h), lopts.indexOf(out_c)); // Specifies the output layer numbers (next_h and next_c)
}
// LSTM with two sets of layers, paired outputs. More stable to train than the single linlayer network
def constructGraph3 = {
import BIDMach.networks.layers.Node._
val odim = dim;
val in_h = copy;
val in_c = copy;
val in_i = copy;
val h_over_i = in_h over in_i;
val lin1 = linear(h_over_i)(prefix+"LSTM_in_out", outdim=2*odim, hasBias=hasBias);
val sp1 = splitvert(lin1, 2);
val lin2 = linear(h_over_i)(prefix+"LSTM_forget_tanh", outdim=2*odim, hasBias=hasBias);
val sp2 = splitvert(lin2, 2);
val in_gate = σ(sp1(0));
val out_gate = σ(sp1(1));
val forget_gate = σ(sp2(0));
val in_sat = tanh(sp2(1));
val in_prod = in_gate ∘ in_sat;
val f_prod = forget_gate ∘ in_c;
val out_c = in_prod + f_prod;
val out_tanh = tanh(out_c);
val out_h = out_gate ∘ out_tanh;
grid = in_h \\ lin1 \\ in_gate \\ in_prod \\ out_tanh on
in_c \\ sp1 \\ out_gate \\ f_prod \\ out_h on
in_i \\ lin2 \\ forget_gate \\ out_c \\ null on
h_over_i \\ sp2 \\ in_sat \\ null \\ null;
val lopts = grid.data;
lopts.map((x:Node) => if (x != null) x.parent = this);
outputNumbers = Array(lopts.indexOf(out_h), lopts.indexOf(out_c)); // Specifies the output layer numbers (next_h and next_c)
}
// LSTM with 2 linear layers from h and i respectively
def constructGraph4 = {
import BIDMach.networks.layers.Node._
val odim = dim;
val in_h = copy;
val in_c = copy;
val in_i = copy;
val linh = linear(in_h)(prefix+"LSTM_h", outdim=4*odim, hasBias=hasBias);
val sph = splitvert(linh, 4);
val lini = linear(in_i)(prefix+"LSTM_i", outdim=4*odim, hasBias=hasBias);
val spi = splitvert(lini, 4);
val lin1 = sph(0) + spi(0);
val lin2 = sph(1) + spi(1);
val lin3 = sph(2) + spi(2);
val lin4 = sph(3) + spi(3);
val in_gate = σ(lin1);
val out_gate = σ(lin2);
val forget_gate = σ(lin3);
val in_sat = tanh(lin4);
val in_prod = in_gate ∘ in_sat;
val f_prod = forget_gate ∘ in_c;
val out_c = in_prod + f_prod;
val out_tanh = tanh(out_c);
val out_h = out_gate ∘ out_tanh;
grid = (in_h on in_c on in_i on null) \\ (linh \\ lin1 \\ in_gate \\ in_prod \\ out_tanh on
sph \\ lin2 \\ out_gate \\ f_prod \\ out_h on
lini \\ lin3 \\ forget_gate \\ out_c \\ null on
spi \\ lin4 \\ in_sat \\ null \\ null);
val lopts = grid.data;
lopts.map((x:Node) => if (x != null) x.parent = this);
outputNumbers = Array(lopts.indexOf(out_h), lopts.indexOf(out_c)); // Specifies the output layer numbers (next_h and next_c)
}
override def clone:LSTMNode = {
copyTo(new LSTMNode).asInstanceOf[LSTMNode];
}
override def create(net:Net):LSTMLayer = {
LSTMLayer(net, this);
}
override def toString = {
"LSTM@"+Integer.toHexString(hashCode % 0x10000).toString
}
def h = apply(0);
def c = apply(1);
}
object LSTMNode {
final val gridTypeNoOutput = 0;
final val gridTypeSoftmaxOutput = 1;
final val gridTypeNegsampOutput = 2;
def apply() = {
val n = new LSTMNode;
n.constructGraph;
n
}
def apply(opts:LSTMNodeOpts) = {
val n = new LSTMNode;
opts.copyOpts(n);
n.constructGraph;
n
}
class GridOpts extends LSTMNodeOpts {var netType = 0; var bylevel = true};
def grid(nrows:Int, ncols:Int, opts:GridOpts):NodeMat = {
import BIDMach.networks.layers.Node._
val nlin = 2;
val odim = opts.outdim;
val idim = opts.dim;
val nsoft = opts.netType match {
case `gridTypeNoOutput` => 0;
case `gridTypeNegsampOutput` => 1;
case `gridTypeSoftmaxOutput` => 2;
}
val gr = NodeMat(nrows + nlin + nsoft, ncols);
for (k <- 0 until ncols) {
gr(0, k) = input
}
val modelName = opts.modelName;
for (k <- 0 until ncols) {
gr(1, k) = linear(gr(0, k))((modelName format 0) +"_bottom", outdim=idim, hasBias = opts.hasBias)
}
for (k <- 0 until ncols) {
for (j <- nlin until nrows + nlin) {
val modelName = if (opts.bylevel) (opts.modelName format j-nlin) else (opts.modelName format 0)
val below = gr(j-1, k);
if (k > 0) {
val left = gr(j, k-1).asInstanceOf[LSTMNode]
gr(j, k) = lstm(h=left.h, c=left.c, i=below, m=modelName)(opts);
} else {
gr(j, k) = lstm(h=null, c=null, i=below, m=modelName)(opts);
}
}
}
opts.netType match {
case `gridTypeNoOutput` => {}
case `gridTypeSoftmaxOutput` => {
for (k <- 0 until ncols) {
gr(nrows + nlin, k) = linear(gr(nrows + nlin - 1, k))(name=opts.modelName+"_top", outdim=odim, hasBias = opts.hasBias)
gr(nrows + nlin + 1, k) = softmaxout(gr(nrows + nlin, k))(opts.scoreType);
}
}
case `gridTypeNegsampOutput` => {
for (k <- 0 until ncols) {
gr(nrows + nlin, k) = negsamp(gr(nrows + nlin - 1, k))(name=opts.modelName+"_top", outdim=odim, hasBias=opts.hasBias, scoreType=opts.scoreType)
}
}
}
gr
}
}
object LSTMLayer {
def apply(net:Net) = new LSTMLayer(net, new LSTMNode);
def apply(net:Net, opts:LSTMNode) = {
val x = new LSTMLayer(net, opts);
x.construct;
x;
}
def grid(net:Net, nrows:Int, ncols:Int, opts:LSTMNode.GridOpts):LayerMat = {
val nodeGrid = LSTMNode.grid(nrows, ncols, opts);
LayerMat(nodeGrid, net);
}
}
| jamesjia94/BIDMach | src/main/scala/BIDMach/networks/layers/LSTM.scala | Scala | bsd-3-clause | 12,912 |
/*
* Copyright 2017-2018 Iaroslav Zeigerman
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akkeeper.master.route
import akka.actor.ActorRef
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import akkeeper.api._
import scala.concurrent.ExecutionContext
class MasterController(service: ActorRef)(implicit dispatcher: ExecutionContext,
timeout: Timeout) extends BaseController {
override val route: Route =
pathPrefix("master") {
path("terminate") {
post {
service ! TerminateMaster
complete(StatusCodes.Accepted -> "")
}
} ~
path("heartbeat") {
post {
service ! Heartbeat
complete(StatusCodes.OK -> "")
}
}
}
}
object MasterController {
def apply(service: ActorRef)(implicit dispatcher: ExecutionContext,
timeout: Timeout): BaseController = {
new MasterController(service)
}
}
| akkeeper-project/akkeeper | akkeeper/src/main/scala/akkeeper/master/route/MasterController.scala | Scala | apache-2.0 | 1,548 |
package git
import org.joda.time.DateTime
trait Comment {
def userId: Int
def createdAt: DateTime
def extRefId: String
var body: String = ""
def hasUserMention: Boolean = {
val regex = "(?:\\\\s|^)@[a-zA-Z0-9]+".r
body != null && regex.findFirstMatchIn(body).isDefined
}
}
| PRioritizer/PRioritizer-predictor | src/main/scala/git/Comment.scala | Scala | mit | 294 |
package org.jetbrains.plugins.scala
package editor.backspaceHandler
import com.intellij.application.options.CodeStyle
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.editorActions.BackspaceHandlerDelegate
import com.intellij.codeInsight.highlighting.BraceMatchingUtil
import com.intellij.openapi.editor.ex.EditorEx
import com.intellij.openapi.editor.highlighter.HighlighterIterator
import com.intellij.openapi.editor.{Document, Editor}
import com.intellij.psi._
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import org.apache.commons.lang3.StringUtils
import org.jetbrains.plugins.scala.editor._
import org.jetbrains.plugins.scala.editor.typedHandler.ScalaTypedHandler
import org.jetbrains.plugins.scala.editor.typedHandler.ScalaTypedHandler.BraceWrapInfo
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.{ScalaTokenTypes, ScalaXmlTokenTypes}
import org.jetbrains.plugins.scala.lang.psi.api.{ScFile, ScalaFile}
import org.jetbrains.plugins.scala.lang.psi.api.expr.xml.ScXmlStartTag
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.docsyntax.ScalaDocSyntaxElementType
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import org.jetbrains.plugins.scala.util.IndentUtil
import org.jetbrains.plugins.scala.util.MultilineStringUtil.{MultilineQuotesLength => QuotesLength}
class ScalaBackspaceHandler extends BackspaceHandlerDelegate {
override def beforeCharDeleted(c: Char, file: PsiFile, editor: Editor): Unit = {
if (!file.is[ScalaFile]) return
val document = editor.getDocument
val offset = editor.getCaretModel.getOffset
val element = file.findElementAt(offset - 1)
if (element == null) return
def scalaSettings = ScalaApplicationSettings.getInstance
if (needCorrectWiki(element)) {
inWriteAction {
if (element.getParent.getLastChild != element) {
val tagToDelete = element.getParent.getLastChild
if (ScalaDocSyntaxElementType.canClose(element.getNode.getElementType, tagToDelete.getNode.getElementType)) {
val textLength =
if (tagToDelete.getNode.getElementType != ScalaDocTokenType.DOC_BOLD_TAG) tagToDelete.getTextLength else 1
document.deleteString(tagToDelete.getTextOffset, tagToDelete.getTextOffset + textLength)
}
} else {
document.deleteString(element.getTextOffset, element.getTextOffset + 2)
editor.getCaretModel.moveCaretRelatively(1, 0, false, false, false)
}
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
} else if (element.getNode.getElementType == ScalaXmlTokenTypes.XML_NAME && element.getParent != null && element.getParent.is[ScXmlStartTag]) {
val openingTag = element.getParent.asInstanceOf[ScXmlStartTag]
val closingTag = openingTag.getClosingTag
if (closingTag != null && closingTag.getTextLength > 3 && closingTag.getText.substring(2, closingTag.getTextLength - 1) == openingTag.getTagName) {
inWriteAction {
val offsetInName = editor.getCaretModel.getOffset - element.getTextOffset + 1
editor.getDocument.deleteString(closingTag.getTextOffset + offsetInName, closingTag.getTextOffset + offsetInName + 1)
PsiDocumentManager.getInstance(file.getProject).commitDocument(editor.getDocument)
}
}
} else if (c == '"') {
val hiterator = editor.asInstanceOf[EditorEx].getHighlighter.createIterator(offset)
if (isInsideEmptyMultilineString(offset, hiterator)) {
if (scalaSettings.INSERT_MULTILINE_QUOTES) {
deleteMultilineStringClosingQuotes(editor, hiterator)
}
} else if (isInsideEmptyXmlAttributeValue(element)) {
inWriteAction {
val document = editor.getDocument
document.deleteString(offset, offset + 1)
editor.getDocument.commit(file.getProject)
}
}
} else if (c == '{' && scalaSettings.WRAP_SINGLE_EXPRESSION_BODY) {
handleLeftBrace(offset, element, file, editor)
}
}
// TODO: simplify when parsing of incomplete multiline strings is unified for interpolated and non-interpolated strings
// see also ScalaQuoteHandler.startsWithMultilineQuotes
private def isInsideEmptyMultilineString(offset: Int, hiterator: HighlighterIterator): Boolean = {
import ScalaTokenTypes._
hiterator.getTokenType match {
case `tMULTILINE_STRING` =>
hiterator.tokenLength == 2 * QuotesLength && offset == hiterator.getStart + QuotesLength
case `tINTERPOLATED_STRING_END` =>
hiterator.tokenLength == QuotesLength && offset == hiterator.getStart && {
hiterator.retreat()
try hiterator.getTokenType == tINTERPOLATED_MULTILINE_STRING && hiterator.tokenLength == QuotesLength
finally hiterator.advance() // pretending we are side-affect-free =/
}
case _ =>
false
}
}
private def deleteMultilineStringClosingQuotes(editor: Editor, hiterator: HighlighterIterator): Unit = {
import ScalaTokenTypes._
val closingQuotesOffset = hiterator.getStart + (hiterator.getTokenType match {
case `tMULTILINE_STRING` => QuotesLength
case `tINTERPOLATED_STRING_END` => 0
case _ => 0
})
inWriteAction {
editor.getDocument.deleteString(closingQuotesOffset, closingQuotesOffset + QuotesLength)
}
}
private def isInsideEmptyXmlAttributeValue(element: PsiElement): Boolean =
element.elementType == ScalaXmlTokenTypes.XML_ATTRIBUTE_VALUE_START_DELIMITER && (element.getNextSibling match {
case null => false
case prev => prev.elementType == ScalaXmlTokenTypes.XML_ATTRIBUTE_VALUE_END_DELIMITER
})
private def needCorrectWiki(element: PsiElement): Boolean = {
(element.getNode.getElementType.is[ScalaDocSyntaxElementType] || element.textMatches("{{{")) &&
(element.getParent.getLastChild != element ||
element.textMatches("'''") && element.getPrevSibling != null &&
element.getPrevSibling.textMatches("'"))
}
private def handleLeftBrace(offset: Int, element: PsiElement, file: PsiFile, editor: Editor): Unit = {
for {
BraceWrapInfo(element, _, parent, _) <- ScalaTypedHandler.findElementToWrap(element)
if element.is[ScBlockExpr]
block = element.asInstanceOf[ScBlockExpr]
rBrace <- block.getRBrace
if canDeleteClosingBrace(block, rBrace)
project = file.getProject
tabSize = CodeStyle.getSettings(project).getTabSize(ScalaFileType.INSTANCE)
if IndentUtil.compare(rBrace, parent, tabSize) >= 0
} {
val document = editor.getDocument
deleteBrace(rBrace, document)
document.commit(project)
}
}
// ! Attention !
// Modifies the document!
// Further modifications of the document must take
// moved positions into account!
// Also document needs to be commited
private def deleteBrace(brace: PsiElement, document: Document): Unit = {
val (start, end) = PsiTreeUtil.nextLeaf(brace) match {
case ws: PsiWhiteSpace =>
if (ws.textContains('\n')) {
val start = PsiTreeUtil.prevLeaf(brace) match {
case ws: PsiWhiteSpace => ws.startOffset + StringUtils.lastIndexOf(ws.getNode.getChars, '\n').max(0)
case _ => brace.startOffset
}
(start, brace.endOffset)
} else {
(brace.startOffset, ws.endOffset)
}
case _ =>
(brace.startOffset, brace.endOffset)
}
document.deleteString(start, end)
}
private def canDeleteClosingBrace(block: ScBlockExpr, blockRBrace: PsiElement): Boolean = {
val statements = block.statements
if (statements.isEmpty)
true
else if (statements.size == 1)
canDeleteClosingBrace(statements.head, blockRBrace)
else
false
}
/**
* do not delete brace if it breaks the code semantics (and leaves the code syntax correct)
* e.g. here we can't delete the brace cause `else` will transfer to the inner `if`
* {{{
* if (condition1) {<CARET>
* if (condition2)
* foo()
* } else
* bar()
* }}}
*/
private def canDeleteClosingBrace(statement: ScBlockStatement, blockRBrace: PsiElement) =
statement match {
case innerIf: ScIf =>
innerIf.elseKeyword.isDefined || !isFollowedBy(blockRBrace, ScalaTokenTypes.kELSE)
case innerTry: ScTry =>
val okFromFinally = innerTry.finallyBlock.isDefined || !isFollowedBy(blockRBrace, ScalaTokenTypes.kFINALLY)
val okFromCatch = innerTry.catchBlock.isDefined || !isFollowedBy(blockRBrace, ScalaTokenTypes.kCATCH)
okFromFinally && okFromCatch
case _ => true
}
private def isFollowedBy(element: PsiElement, elementType: IElementType): Boolean = {
val next = element.getNextNonWhitespaceAndNonEmptyLeaf
next != null && next.elementType == elementType
}
/*
In some cases with nested braces (like '{()}' ) IDEA can't properly handle backspace action due to
bag in BraceMatchingUtil (it looks for every lbrace/rbrace token regardless of they are a pair or not)
So we have to fix it in our handler
*/
override def charDeleted(deletedChar: Char, file: PsiFile, editor: Editor): Boolean = {
val scalaFile = file match {
case f: ScFile => f
case _ =>
return false
}
val document = editor.getDocument
val offset = editor.getCaretModel.getOffset
if (offset >= document.getTextLength) {
return false
}
if (CodeInsightSettings.getInstance.AUTOINSERT_PAIR_BRACKET) {
handleAutoInsertBraces(deletedChar, offset, scalaFile, document, editor)
}
if (ScalaApplicationSettings.getInstance.HANDLE_BLOCK_BRACES_REMOVAL_AUTOMATICALLY && !deletedChar.isWhitespace) {
handleDeleteAutoBrace(offset, document, scalaFile, editor)
}
false
}
private def handleAutoInsertBraces(deletedChar: Char, offset: Int, file: ScFile, document: Document, editor: Editor): Unit = {
def hasLeft: Option[Boolean] = {
val txt = document.getImmutableCharSequence
val iterator = editor.asInstanceOf[EditorEx].getHighlighter.createIterator(offset)
val tpe = iterator.getTokenType
if (tpe == null)
return None
val fileType = file.getFileType
val matcher = BraceMatchingUtil.getBraceMatcher(fileType, tpe)
if (matcher == null)
return None
val stack = scala.collection.mutable.Stack[IElementType]()
iterator.retreat()
while (!iterator.atEnd() && iterator.getStart > 0 && iterator.getTokenType != null) {
if (matcher.isRBraceToken(iterator, txt, fileType)) stack push iterator.getTokenType
else if (matcher.isLBraceToken(iterator, txt, fileType)) {
if (stack.isEmpty || !matcher.isPairBraces(iterator.getTokenType, stack.pop()))
return Some(false)
}
iterator.retreat()
}
if (stack.isEmpty) Some(true)
else None
}
@inline
def fixBrace(): Unit = if (hasLeft.exists(!_)) {
inWriteAction {
document.deleteString(offset, offset + 1)
}
}
val charNext = document.getImmutableCharSequence.charAt(offset)
(deletedChar, charNext) match {
case ('{', '}') => fixBrace()
case ('(', ')') => fixBrace()
case _ =>
}
}
private def handleDeleteAutoBrace(offset: Int, document: Document, file: ScFile, editor: Editor): Unit = {
val lineText = document.lineTextAt(offset)
if (!containsOnlyWhitespaces(lineText)) {
return
}
val project = file.getProject
document.commit(file.getProject)
val element = file.findElementAt(offset)
element.parents.find(_.is[ScBlockExpr]) match {
case Some(block: ScBlockExpr) if canAutoDeleteBraces(block) && AutoBraceUtils.isIndentationContext(block) =>
(block.getLBrace, block.getRBrace, block.getParent) match {
case (Some(lBrace), Some(rBrace), parent: PsiElement) =>
val tabSize = CodeStyle.getSettings(project).getTabSize(ScalaFileType.INSTANCE)
if (IndentUtil.compare(rBrace, parent, tabSize) >= 0) {
val firstWhiteSpace = lBrace.nextSibling.filter(ws => ws.isWhitespace && ws.getNextSibling != rBrace)
val caretIsOnBlockIndent = firstWhiteSpace.forall(ws =>
IndentUtil.calcIndent(ws.getNode.getChars, tabSize) == IndentUtil.calcIndent(lineText, tabSize)
)
if (caretIsOnBlockIndent) {
val document = editor.getDocument
// delete closing brace first because deleting it doesn't change position of the left brace
deleteBrace(rBrace, document)
deleteBrace(lBrace, document)
document.commit(project)
}
}
case _ =>
}
case _ =>
}
}
private def containsOnlyWhitespaces(seq: CharSequence): Boolean = {
seq.forall(_.isWhitespace)
}
private def canAutoDeleteBraces(block: ScBlockExpr): Boolean = {
val it = block.children
var foundExpressions = false
while (it.hasNext) {
it.next() match {
case _: ScExpression =>
// return early if we already found 2 expressions/comments
if (foundExpressions)
return false
foundExpressions = true
case _: ScBlockStatement /* cannot be an expression because we matched those in the previous case */ =>
return false
case _: PsiComment =>
return false
case _ =>
}
}
true
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/editor/backspaceHandler/ScalaBackspaceHandler.scala | Scala | apache-2.0 | 13,855 |
package com.hasanozgan.services.myservice.httpservices
import com.hasanozgan.services.myservice.httpservices.models.User
import com.wordnik.swagger.annotations._
import spray.routing.Route
// Trying to not pollute the code with annotations
@Api(value = "/membership", description = "Operations for users.", consumes= "application/json", produces = "application/json")
trait MembershipHttpServiceDoc {
@ApiOperation(value = "Get a user by id", httpMethod = "POST", response = classOf[User])
@ApiImplicitParams(Array(
new ApiImplicitParam(name = "userId", value="ID of the user that needs to retrieved", required = true, dataType = "integer", paramType = "path" )
))
@ApiResponses(Array(
new ApiResponse(code = 200, message = "Ok"),
new ApiResponse(code = 404, message = "User not found")
))
def signIn: Route
@ApiOperation(value = "Add a new user to the system", httpMethod = "POST", consumes="application/json", response = classOf[User])
@ApiImplicitParams(Array(
new ApiImplicitParam(name = "body", value="User object to be added", required = true, dataType = "router.UserDto", paramType = "body" )
))
@ApiResponses(Array(
new ApiResponse(code = 405, message = "Invalid user"),
new ApiResponse(code = 201, message = "Entity Created")
))
def signUp: Route
} | hasanozgan/spray-microservice-template | service/src/main/scala/com/hasanozgan/services/myservice/httpservices/MembershipHttpServiceDoc.scala | Scala | mit | 1,311 |
package utils.pageobjects.breaks_in_care
import controllers.mappings.Mappings
import utils.WithBrowser
import utils.pageobjects._
class GBreaksInCareOtherPage(ctx:PageObjectsContext, iteration: Int) extends ClaimPage(ctx, GBreaksInCareOtherPage.url+"/"+iteration, iteration) {
declareDate("#caringEnded_date", "AboutTheCareYouProvideBreakEndDate_" + iteration)
declareInput("#caringEnded_time", "AboutTheCareYouProvideBreakEndTime_" + iteration)
declareYesNo("#caringStarted_answer","AboutTheCareYouProvideBreakStartAnswer_"+iteration)
declareDate("#caringStarted_date", "AboutTheCareYouProvideBreakStartDate_" + iteration)
declareInput("#caringStarted_time", "AboutTheCareYouProvideBreakStartTime_" + iteration)
declareRadioList("#whereWasDp_answer","AboutTheCareYouProvideBreakWhereWasDp_"+iteration)
declareInput("#whereWasDp_text", "AboutTheCareYouProvideBreakWhereWasDpText_" + iteration)
declareRadioList("#whereWereYou_answer","AboutTheCareYouProvideBreakWhereWereYou_"+iteration)
declareInput("#whereWereYou_text", "AboutTheCareYouProvideBreakWhereWereYouText_" + iteration)
protected override def getNewIterationNumber = {
import IterationManager._
ctx.iterationManager.increment(Breaks)
}
}
object GBreaksInCareOtherPage {
val url = "/breaks/other-breaks"
def apply(ctx:PageObjectsContext, iteration:Int=1) = new GBreaksInCareOtherPage(ctx,iteration)
def fillDetails(context: PageObjectsContext, f: => TestData => Unit) = {
val claimData = defaultOtherDetails()
f(claimData)
val page = new GBreaksInCareOtherPage(context, 1) goToThePage()
page.fillPageWith(claimData)
page.submitPage()
}
def fillDetails2(context: PageObjectsContext, f: => TestData => Unit) = {
val claimData = defaultOtherDetails2()
f(claimData)
val page = new GBreaksInCareOtherPage(context, 2) goToThePage()
page.fillPageWith(claimData)
page.submitPage()
}
def fillDetails3(context: PageObjectsContext, f: => TestData => Unit) = {
val claimData = defaultOtherDetails3()
f(claimData)
val page = new GBreaksInCareOtherPage(context, 3) goToThePage()
page.fillPageWith(claimData)
page.submitPage()
}
private def defaultOtherDetails() = {
val claim = new TestData
claim.AboutTheCareYouProvideBreakEndDate_1 = "01/10/2015"
claim.AboutTheCareYouProvideBreakStartAnswer_1 = Mappings.no
claim
}
private def defaultOtherDetails2() = {
val claim = new TestData
claim.AboutTheCareYouProvideBreakEndDate_2 = "01/10/2015"
claim.AboutTheCareYouProvideBreakStartAnswer_2 = Mappings.no
claim
}
private def defaultOtherDetails3() = {
val claim = new TestData
claim.AboutTheCareYouProvideBreakEndDate_3 = "01/10/2015"
claim.AboutTheCareYouProvideBreakStartAnswer_3 = Mappings.no
claim
}
}
trait GBreaksInCareOtherPageContext extends PageContext {
this: WithBrowser[_] =>
val page = GBreaksInCareOtherPage (PageObjectsContext(browser), iteration = 1)
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/utils/pageobjects/breaks_in_care/GBreaksInCareOtherPage.scala | Scala | mit | 3,000 |
package com.socrata.geospace.lib.client
import com.rojoma.json.v3.conversions._
import com.rojoma.json.v3.ast._
import com.socrata.geospace.lib.feature.FeatureExtensions._
import com.socrata.thirdparty.geojson.JtsCodecs
import com.vividsolutions.jts.geom.{Geometry, MultiPolygon}
import org.geoscript.feature._
import org.opengis.feature.`type`.PropertyDescriptor
import scala.collection.JavaConverters._
/**
* Generates Soda1 requests from geo schemata and feature collections
*/
object GeoToSoda1Converter {
// The feature ID needs to be a part of every row of the shape dataset so we can correlate other datasets
// such as points to the belonging feature.
// Note: is there a better way to come up with a column name for feature ID?
// This here is a hack.
// scalastyle:off
val FeatureIdColName = "_feature_id"
val FeatureIdColumnDef = JObject(Map(
"fieldName" -> JString(FeatureIdColName),
"dataTypeName" -> JString("number"),
"name" -> JString(FeatureIdColName)
))
val FeatureIdStringColName = "_feature_id_string"
val FeatureIdStringColumnDef = JObject(Map(
"fieldName" -> JString(FeatureIdStringColName),
"dataTypeName" -> JString("text"),
"name" -> JString(FeatureIdStringColName)
))
/**
* Maps shapefile types to Soda1 types
*/
val soda1TypeMap = Map[Class[_], String](
classOf[MultiPolygon] -> "multipolygon",
classOf[String] -> "text",
classOf[java.lang.Integer] -> "number",
classOf[java.lang.Double] -> "number",
classOf[java.lang.Long] -> "number"
)
// scalastyle:on
/**
* Generates a Soda1 create request body
* @param friendlyName Human readable name of the dataset
* @return Soda2 create request body
*/
def getCreateBody(friendlyName: String): JValue = JObject(Map("name" -> JString(friendlyName)))
/**
* Generates a Soda1 add column request body for each column in the schema
* @param schema Schema definition
* @return List of Soda1 add column request bodies
*/
def getAddColumnBodies(schema: Schema): Iterable[JValue] =
// Storing both the original shapefile feature ID (just in case - might go away later)
// and an ID that's more efficient to do group by queries on
Seq(FeatureIdColumnDef, FeatureIdStringColumnDef) ++ schema.getDescriptors.asScala.map(columnToJObject)
/**
* Generates a Soda1 upsert request body
* @param features Features representing the rows to upsert
* @param schema Schema definition
* @return Soda1 upsert request body
*/
def getUpsertBody(features: Traversable[Feature], schema: Schema): JValue = {
val attrNames = schema.getDescriptors.asScala.map(_.getName.toString.toLowerCase)
val featuresAsJObject = features.map(rowToJObject(_, attrNames.toSeq))
JArray(featuresAsJObject.toSeq)
}
/**
* Converts a geo schema attribute to a Dataspace JSON column definition
* @param attr Attribute to be converted to a column
* @return JSON representation of the column
*/
private def columnToJObject(attr: PropertyDescriptor): JValue = {
val name = attr.getName.toString.toLowerCase
val typ = soda1TypeMap.getOrElse(
attr.getType.getBinding,
throw new IllegalArgumentException(
s"Unsupported type in shapefile: '${attr.getType.getBinding.getCanonicalName}'"))
JObject(Map(
"fieldName" -> JString(name),
"dataTypeName" -> JString(typ),
"name" -> JString(name)
))
}
/**
* Converts a geo feature to a Core server JSON row definition
* @param feature Feature to be converted to a row
* @param attrNames List of column names
* @return JSON representation of the row
*/
private def rowToJObject(feature: Feature, attrNames: Seq[String]): JValue = {
require(feature.getAttributes.size == attrNames.size, "Inconsistency between shapefile schema and features")
val fields = feature.getAttributes.asScala.zip(attrNames).map {
case (g: Geometry, name) => name -> JtsCodecs.geoCodec.encode(g)
case (attr, name) => name -> JString(attr.toString)
}
JObject(fields.toMap +
(FeatureIdStringColName -> JString(feature.getID)) +
(FeatureIdColName -> JNumber(feature.numericId)))
}
}
| socrata-platform/geospace | src/main/scala/com/socrata/geospace/lib/client/GeoToSoda1Converter.scala | Scala | apache-2.0 | 4,281 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.logic.command
import org.digimead.digi.lib.api.XDependencyInjection
import org.digimead.tabuddy.desktop.core.definition.command.Command
import org.digimead.tabuddy.desktop.logic.payload.Payload
import org.digimead.tabuddy.model.serialization.Serialization
import scala.language.implicitConversions
/**
* Parser builder for serialization type argument.
*/
class SerializationTypeParser {
import Command.parser._
/** Set of valid digest identifiers. */
lazy val validIdentifiers = Payload.availableSerialization
/** Create parser for the digest configuration. */
def apply(tag: String = ""): Command.parser.Parser[Any] =
sp ~> commandRegex("\\\\w+".r, NameHintContainer) ^? {
case CompletionRequest(extension) ⇒
validIdentifiers.find(_.extension.name.toUpperCase() == extension).map(_.extension.name) getOrElse { extension }
case extension ⇒
validIdentifiers.find(_.extension.name.toUpperCase() == extension).map(_.extension.name) getOrElse {
throw new Command.ParseException(s"Serialization mechanism with extension '$extension' not found.")
}
} ^^ (extension ⇒ SerializationTypeParser.Argument(tag, validIdentifiers.find(_.extension.name == extension).getOrElse {
throw new Command.ParseException(s"Serialization mechanism with extension '$extension' not found.")
}))
/** Hint container for digest mechanism name. */
object NameHintContainer extends Command.Hint.Container {
/** Get parser hints for user provided argument. */
def apply(arg: String): Seq[Command.Hint] = {
validIdentifiers.toSeq.sortBy(_.extension.name).filter(_.extension.name.startsWith(arg)).map(proposal ⇒
Command.Hint(proposal.extension.name.toUpperCase(), Some(proposal.description), Seq(proposal.extension.name.toUpperCase().drop(arg.length)))).
filter(_.completions.head.nonEmpty)
}
}
}
object SerializationTypeParser {
import Command.parser._
implicit def parser2implementation(c: SerializationTypeParser.type): SerializationTypeParser = c.inner
/** Serialization type option name. */
private val serializationArg = "-serialization"
/** Get SerializationTypeParser implementation. */
def inner() = DI.implementation
/** SerializationType parser. */
def parser(tag: String = "serialization") = (serializationArg, Command.Hint(serializationArg, Some("Serialization type for data files"))) ~> SerializationTypeParser(tag)
/** Parser result. */
case class Argument(tag: String, value: Serialization.Identifier)
/**
* Dependency injection routines
*/
private object DI extends XDependencyInjection.PersistentInjectable {
/** SerializationTypeParser implementation. */
lazy val implementation = injectOptional[SerializationTypeParser] getOrElse new SerializationTypeParser
}
}
| digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/command/SerializationTypeParser.scala | Scala | agpl-3.0 | 5,095 |
package reactivemongo.api.commands
import scala.concurrent.{ ExecutionContext, Future }
import reactivemongo.api.{
Cursor,
CursorOptions,
Collection,
DB,
SerializationPack,
Session,
ReadPreference
}
import reactivemongo.api.bson.buffer.WritableBuffer
import reactivemongo.core.protocol.{ Reply, Response }
import reactivemongo.core.actors.ExpectingResponse
import reactivemongo.core.errors.GenericDriverException
trait Command {
private[reactivemongo] def commandKind: CommandKind
}
trait CollectionCommand extends Command
trait CommandWithResult[R] { _self: Command => }
trait CommandWithPack[P <: SerializationPack] { _self: Command => }
/**
* @param response the response associated with the result
* @param numberToReturn the number of documents to return
* @param value the value parsed from the response
*/
private[reactivemongo] case class ResponseResult[R](
response: Response,
numberToReturn: Int,
value: R)
/**
* Fetches a cursor from MongoDB results.
*
* @tparam P the type of the serialization pack
* @tparam C the type of the cursor implementation
*/
sealed trait CursorFetcher[P <: SerializationPack, +C[_] <: Cursor[_]] {
val pack: P
def one[A](readPreference: ReadPreference)(implicit reader: pack.Reader[A], ec: ExecutionContext): Future[A]
def cursor[A](readPreference: ReadPreference)(implicit reader: pack.Reader[A]): C[A]
protected def defaultReadPreference: ReadPreference
}
private[reactivemongo] object Command {
import reactivemongo.api.{
DefaultCursor,
Failover,
FailoverStrategy
}
import reactivemongo.core.netty.BufferSequence
import reactivemongo.core.protocol.{
Query,
QueryFlags,
RequestMaker,
Response
}
private[commands] lazy val logger =
reactivemongo.util.LazyLogger("reactivemongo.api.commands")
def defaultCursorFetcher[P <: SerializationPack, A](db: DB, p: P, kind: CommandKind, command: A, failover: FailoverStrategy)(implicit writer: p.Writer[A]): CursorFetcher[p.type, DefaultCursor.Impl] = fetchCursor[p.type, A](db, db.name + f".$$cmd", p, kind, command, failover, CursorOptions.empty, maxAwaitTimeMS = None)
/**
* @param fullCollectionName the fully qualified collection name (even if `query.fullCollectionName` is `\\$cmd`)
*/
def fetchCursor[P <: SerializationPack, A](
db: DB,
fullCollectionName: String,
p: P,
kind: CommandKind,
command: A,
failover: FailoverStrategy,
options: CursorOptions,
maxAwaitTimeMS: Option[Long])(implicit writer: p.Writer[A]): CursorFetcher[p.type, DefaultCursor.Impl] = new CursorFetcher[p.type, DefaultCursor.Impl] {
val pack: p.type = p
@inline protected def defaultReadPreference = db.defaultReadPreference
def one[T](readPreference: ReadPreference)(implicit reader: pack.Reader[T], ec: ExecutionContext): Future[T] = {
def requestMaker = buildRequestMaker(pack)(
kind, command, writer, readPreference, db.name)
val contextSTE = reactivemongo.util.Trace.currentTraceElements
Failover(db.connection, failover) { () =>
db.connection.sendExpectingResponse(new ExpectingResponse(
requestMaker = requestMaker,
pinnedNode = for {
s <- db.session
t <- s.transaction.toOption
n <- t.pinnedNode
} yield n))
}.future.recoverWith {
case cause => Future.failed[Response] {
cause.setStackTrace(contextSTE.toArray)
cause
}
}.flatMap {
case Response.CommandError(_, _, _, cause) =>
cause.originalDocument match {
case pack.IsDocument(doc) =>
// Error document as result
Future(pack.deserialize(doc, reader))
case _ => Future.failed[T] {
cause.setStackTrace(contextSTE.toArray)
cause
}
}
case response @ Response.Successful(_, Reply(_, _, _, 0), _, _) =>
Future.failed[T](new GenericDriverException(
s"Cannot parse empty response: $response"))
case response => db.session match {
case Some(session) =>
Session.updateOnResponse(session, response).map {
case (_, resp) => pack.readAndDeserialize(resp, reader)
}
case _ =>
Future(pack.readAndDeserialize(response, reader))
}
}
}
def cursor[T](readPreference: ReadPreference)(implicit reader: pack.Reader[T]): DefaultCursor.Impl[T] = {
val flags = {
if (readPreference.slaveOk) options.slaveOk.flags
else options.flags
}
val op = Query(flags, db.name + f".$$cmd", 0, 1)
DefaultCursor.query(pack, op, (_: Int) => {
val buffer = WritableBuffer.empty
pack.serializeAndWrite(buffer, command, writer)
BufferSequence(buffer.buffer)
}, readPreference, db, failover, fullCollectionName, maxAwaitTimeMS)
}
}
final class CommandWithPackRunner[P <: SerializationPack](val pack: P, failover: FailoverStrategy = FailoverStrategy()) {
def apply[R, C <: Command with CommandWithResult[R]](db: DB, command: C with CommandWithResult[R], rp: ReadPreference)(implicit writer: pack.Writer[C], reader: pack.Reader[R], ec: ExecutionContext): Future[R] = defaultCursorFetcher(db, pack, command.commandKind, command, failover).one[R](rp)
def apply[C <: Command](db: DB, command: C)(implicit writer: pack.Writer[C]): CursorFetcher[pack.type, Cursor] = defaultCursorFetcher(db, pack, command.commandKind, command, failover)
// collection
def apply[R, C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C with CommandWithResult[R], rp: ReadPreference)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[R] = defaultCursorFetcher(collection.db, pack, command.commandKind, new ResolvedCollectionCommand(collection.name, command), failover).one[R](rp)
def apply[C <: CollectionCommand](collection: Collection, command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): CursorFetcher[pack.type, Cursor] = defaultCursorFetcher(collection.db, pack, command.commandKind, new ResolvedCollectionCommand(collection.name, command), failover)
/**
* Executes the `command` and returns its result
* along with the MongoDB response.
*/
def cursor[R, C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C, options: CursorOptions, rp: ReadPreference, maxAwaitTimeMS: Option[Long])(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R]): DefaultCursor.Impl[R] = fetchCursor(collection.db, collection.fullCollectionName, pack,
command.commandKind,
new ResolvedCollectionCommand(collection.name, command),
failover, options, maxAwaitTimeMS).cursor[R](rp)
/**
* Executes the `command` and returns its result
* along with the MongoDB response.
*/
def withResponse[R, C <: CollectionCommand with CommandWithResult[R]](collection: Collection, command: C, rp: ReadPreference)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[ResponseResult[R]] = {
val cursor = defaultCursorFetcher(collection.db, pack,
command.commandKind,
new ResolvedCollectionCommand(collection.name, command), failover).
cursor[R](rp)
for {
resp <- cursor.makeRequest(cursor.numberToReturn)
iterator = cursor.documentIterator(resp)
result <- {
if (!iterator.hasNext) {
Future.failed(new GenericDriverException("missing result"))
} else Future.successful(iterator.next())
}
} yield ResponseResult(resp, cursor.numberToReturn, result)
}
def rawCommand[T](input: T)(implicit writer: pack.Writer[T]): RawCommand =
RawCommand(pack.serialize(input, writer))
case class RawCommand(document: pack.Document) extends Command {
val commandKind = CommandKind.Undefined
}
object RawCommand {
implicit val writer: pack.Writer[RawCommand] = pack.writer(_.document)
}
}
/*
* Returns a command runner.
*
* @param pack the serialization pack
* @param failover the failover strategy
*
* {{{
* import reactivemongo.api.FailoverStrategy
* import reactivemongo.api.commands.Command
*
* import reactivemongo.api.bson.BSONDocument
* import reactivemongo.api.bson.collection.BSONSerializationPack
*
* val runner = Command.run(BSONSerializationPack, FailoverStrategy.default)
* val cmd: runner.RawCommand =
* runner.rawCommand(BSONDocument(f"$$count" -> "coll"))
*
* def foo(db: reactivemongo.api.DB) = runner(db, cmd)
* }}}
*/
def run[P <: SerializationPack](pack: P, failover: FailoverStrategy): CommandWithPackRunner[pack.type] = new CommandWithPackRunner(pack, failover)
/**
* @param command the command to be requested
* @param db the database name
* @param compressors the available compressors
*/
def buildRequestMaker[P <: SerializationPack, A](pack: P)(
kind: CommandKind,
command: A,
writer: pack.Writer[A],
readPreference: ReadPreference,
db: String): RequestMaker = {
val buffer = WritableBuffer.empty
pack.serializeAndWrite(buffer, command, writer)
val documents = BufferSequence(buffer.buffer)
val flags = if (readPreference.slaveOk) QueryFlags.SlaveOk else 0
val query = Query(flags, db + f".$$cmd", 0, 1)
RequestMaker(kind, query, documents, readPreference)
}
}
/**
* @param collection the name of the collection against which the command is executed
* @param command the executed command
*/
final class ResolvedCollectionCommand[C <: CollectionCommand](
val collection: String,
val command: C) extends Command {
@inline def commandKind = command.commandKind
private lazy val tupled = collection -> command
override def hashCode: Int = tupled.hashCode
@SuppressWarnings(Array("ComparingUnrelatedTypes"))
override def equals(that: Any): Boolean = that match {
case other: ResolvedCollectionCommand[_] =>
this.tupled == other.tupled
case _ =>
false
}
@inline override def toString: String =
s"ResolvedCollectionCommand${tupled.toString}"
}
| ReactiveMongo/ReactiveMongo | driver/src/main/scala/api/commands/commands.scala | Scala | apache-2.0 | 10,357 |
/*
* Copyright © 2016 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.examples.sparkstreaming
import co.cask.cdap.api.Resources
import co.cask.cdap.api.common.Bytes
import co.cask.cdap.api.spark.{AbstractSpark, SparkExecutionContext, SparkMain}
import com.google.common.base.Strings
import kafka.serializer.{DefaultDecoder, StringDecoder}
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.{Logger, LoggerFactory}
/**
* Consumes messages from kafka topics and classify them as spam or ham based on NaiveBayes model.
* A comma separated list of brokers and topics must be provided as runtime arguments to this program.
* Kafka messages should be in format of message-id:message where message-id should be unique. For example
* message1:hello world
*/
class SpamClassifierProgram(name: String) extends AbstractSpark with SparkMain {
import SpamClassifierProgram._
def this() = this(classOf[SpamClassifierProgram].getSimpleName)
override protected def configure(): Unit = {
setName(name)
setMainClass(classOf[SpamClassifierProgram])
setDescription("Spark Streaming Based Kaka Message Classifier");
setDriverResources(new Resources(2048))
setExecutorResources(new Resources(1024))
}
override def initialize(): Unit = {
getContext().setSparkConf(new SparkConf().set("spark.driver.extraJavaOptions", "-XX:MaxPermSize=256m"))
}
override def run(implicit sec: SparkExecutionContext) {
val sparkContext = new SparkContext
LOG.info("Reading stream {} to build classification model", SpamClassifier.STREAM)
// read the training data stream
val trainingData = sparkContext.fromStream[String](SpamClassifier.STREAM, 0, Long.MaxValue)
val termFrequencies = new HashingTF(numFeatures = 1000)
// spam messages
val spam = trainingData.filter(_.startsWith("spam")).map(_.split("\\t")(1))
// not spam messages
val ham = trainingData.filter(_.startsWith("ham")).map(_.split("\\t")(1))
val spamLabeledPoint = spam.map(line => termFrequencies.transform(line.split(" "))).map(x => new LabeledPoint(1, x))
val hamLabeledPoint = ham.map(line => termFrequencies.transform(line.split(" "))).map(x => new LabeledPoint(0, x))
val modelData = spamLabeledPoint.union(hamLabeledPoint)
val model = NaiveBayes.train(modelData, 1.0)
LOG.info("Built a NaiveBayes model for classification with training data of size: {}", modelData.count())
val streamingContext = new StreamingContext(sparkContext, Seconds(2))
// Create direct kafka stream with brokers and topics
val brokers = sec.getRuntimeArguments.get("kafka.brokers")
require(!Strings.isNullOrEmpty(brokers), "A comma separated list of kafka brokers must be specified. For example:" +
" broker1-host:port,broker2-host:port")
val topics = sec.getRuntimeArguments.get("kafka.topics")
require(!Strings.isNullOrEmpty(topics), "A comma separated list of kafka topics must be specified. For example: " +
"topic1,topic2")
val topicsSet = topics.split(",").toSet
val kafkaParams = Map("metadata.broker.list" -> brokers, "auto.offset.reset" -> "smallest")
LOG.info("Trying to create a DStream for Kafka with kafka params {} and topics {}", kafkaParams, topicsSet: Any)
val kafkaData = KafkaUtils.createDirectStream[Array[Byte], String, DefaultDecoder, StringDecoder](streamingContext,
kafkaParams, topicsSet)
// transform the kafka DStream to a key-value pair of message-id and message
val messages = kafkaData.map(_._2.split(":", 2) match { case Array(x, y) => (x, y) })
// for each RDD in the DStream transform it a RDD of message-id and label (spam or ham)
messages.foreachRDD { rdd =>
rdd.map { line =>
val vector = termFrequencies.transform(line._2.split(" "))
val value: Double = model.predict(vector)
(Bytes.toBytes(line._1), value)
}.saveAsDataset(SpamClassifier.DATASET)
}
streamingContext.start()
try {
streamingContext.awaitTermination()
} catch {
case _: InterruptedException => streamingContext.stop(true, true)
}
}
}
object SpamClassifierProgram {
private final val LOG: Logger = LoggerFactory.getLogger(classOf[SpamClassifierProgram])
}
| caskdata/cdap | cdap-examples/SpamClassifier/src/main/scala/co/cask/cdap/examples/sparkstreaming/SpamClassifierProgram.scala | Scala | apache-2.0 | 5,074 |
package sorm.test.features
import org.scalatest.{FunSuite, Matchers}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import sorm._, core._
import sext._, embrace._
import sorm.test.MultiInstanceSuite
@RunWith(classOf[JUnitRunner])
class FetchWithSqlSuite extends FunSuite with Matchers with MultiInstanceSuite {
import FetchWithSqlSuite._
def entities = Set() + Entity[A]()
override val dbTypes = DbType.H2 :: Nil
instancesAndIds foreach { case (db, dbId) =>
val a1 = db.save(A( "A" ))
val a2 = db.save(A( "B" ))
val a3 = db.save(A( "C" ))
test(dbId + " - general") {
db.fetchWithSql[A]("select id from a where a=?", "B").head
.should( equal(a2) )
}
test(dbId + " - fails on excess columns") {
intercept[AssertionError] {
db.fetchWithSql[A]("select id, a from a where a=?", "B")
}
}
test(dbId + " - fails on a wrong single column") {
intercept[AssertionError] {
db.fetchWithSql[A]("select a from a where a=?", "B")
}
}
}
}
object FetchWithSqlSuite {
case class A ( a : String )
} | pjfanning/sorm | src/test/scala/sorm/test/features/FetchWithSqlSuite.scala | Scala | mit | 1,115 |
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair.crypto.keymanager
import java.io.File
import java.nio.file.Files
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
import fr.acinq.bitcoin.DeterministicWallet.KeyPath
import fr.acinq.bitcoin.{Block, ByteVector32, DeterministicWallet}
import fr.acinq.eclair.Setup.Seeds
import fr.acinq.eclair.channel.ChannelConfig
import fr.acinq.eclair.crypto.ShaChain
import fr.acinq.eclair.{NodeParams, TestConstants, TestUtils}
import org.scalatest.funsuite.AnyFunSuite
import scodec.bits._
class LocalChannelKeyManagerSpec extends AnyFunSuite {
test("generate the same secrets from the same seed") {
// data was generated with eclair 0.3
val seed = hex"17b086b228025fa8f4416324b6ba2ec36e68570ae2fc3d392520969f2a9d0c1501"
val nodeKeyManager = new LocalNodeKeyManager(seed, Block.TestnetGenesisBlock.hash)
val channelKeyManager = new LocalChannelKeyManager(seed, Block.TestnetGenesisBlock.hash)
assert(nodeKeyManager.nodeId == PublicKey(hex"02a051267759c3a149e3e72372f4e0c4054ba597ebfd0eda78a2273023667205ee"))
val keyPath = KeyPath("m/1'/2'/3'/4'")
assert(channelKeyManager.commitmentSecret(keyPath, 0L).value == ByteVector32.fromValidHex("fa7a8c2fc62642f7a9a19ea0bfad14d39a430f3c9899c185dcecc61c8077891e"))
assert(channelKeyManager.commitmentSecret(keyPath, 1L).value == ByteVector32.fromValidHex("3e82338d3e487c760ee10448127613d196b040e86ce90d2d437db6425bb7301c"))
assert(channelKeyManager.commitmentSecret(keyPath, 2L).value == ByteVector32.fromValidHex("102357f7a9b2d0b9147f645c98aa156d3278ddb4745caf0631773dd663e76e6f"))
assert(channelKeyManager.commitmentPoint(keyPath, 0L).value == hex"0x0237dd5a0ea26ed84ed1249d46cc715679b542939d6943b42232e043825cde3944")
assert(DeterministicWallet.encode(channelKeyManager.delayedPaymentPoint(keyPath), DeterministicWallet.tpub) == "tpubDMBn7xW1g1Gsok5eThkJAKJnB3ZFqZQnvsdWv8VvM3RjZkqVPZZpjPDAAmbyDHnZPdAZY8EnFBh1ibTBtiuDqb8t9wRcAZiFihma3yYRG1f")
assert(DeterministicWallet.encode(channelKeyManager.htlcPoint(keyPath), DeterministicWallet.tpub) == "tpubDMBn7xW1g1GsqpsqaVNB1ehpjktQUX44Dycy7fJ6thp774XGzNeWFmQf5L6dVChHREgkoc8BYc2caHqwc2mZzTYCwoxsvrpchBSujsPCvGH")
assert(DeterministicWallet.encode(channelKeyManager.paymentPoint(keyPath), DeterministicWallet.tpub) == "tpubDMBn7xW1g1Gsme9jTAEJwTvizDJtJEgE3jc9vkDqQ9azuh9Es2aM6GsioFiouwdvWPJoNw2zavCkVTMta6UJN6BWR5cMZQsSHvsFyQNfGzv")
assert(DeterministicWallet.encode(channelKeyManager.revocationPoint(keyPath), DeterministicWallet.tpub) == "tpubDMBn7xW1g1GsizhaZ7M4co6sBtUDhRUKgUUPWRv3WfLTpTGYrSjATJy6ZVSoYFCKRnaBop5dFig3Ham1P145NQAKuUgPUbujLAooL7F2vy6")
}
test("compute channel key path from funding keys") {
// if this test fails it means that we don't generate the same channel key path from the same funding pubkey, which
// will break existing channels !
val pub = PrivateKey(ByteVector32.fromValidHex("01" * 32)).publicKey
val keyPath = ChannelKeyManager.keyPath(pub)
assert(keyPath.toString() == "m/1909530642'/1080788911/847211985'/1791010671/1303008749'/34154019'/723973395/767609665")
}
def makefundingKeyPath(entropy: ByteVector, isFunder: Boolean): KeyPath = {
val items = for (i <- 0 to 7) yield entropy.drop(i * 4).take(4).toInt(signed = false) & 0xFFFFFFFFL
val last = DeterministicWallet.hardened(if (isFunder) 1L else 0L)
KeyPath(items :+ last)
}
test("test vectors (testnet, funder)") {
val seed = ByteVector.fromValidHex("17b086b228025fa8f4416324b6ba2ec36e68570ae2fc3d392520969f2a9d0c1501")
val channelKeyManager = new LocalChannelKeyManager(seed, Block.TestnetGenesisBlock.hash)
val fundingKeyPath = makefundingKeyPath(hex"be4fa97c62b9f88437a3be577b31eb48f2165c7bc252194a15ff92d995778cfb", isFunder = true)
val fundingPub = channelKeyManager.fundingPublicKey(fundingKeyPath)
val localParams = TestConstants.Alice.channelParams.copy(fundingKeyPath = fundingKeyPath)
val channelKeyPath = channelKeyManager.keyPath(localParams, ChannelConfig.standard)
assert(fundingPub.publicKey == PrivateKey(hex"216414970b4216b197a1040367419ad6922f80e8b73ced083e9afe5e6ddd8e4c").publicKey)
assert(channelKeyManager.revocationPoint(channelKeyPath).publicKey == PrivateKey(hex"a4e7ab3c54752a3487b3c474467843843f28d3bb9113e65e92056ad45d1e318e").publicKey)
assert(channelKeyManager.paymentPoint(channelKeyPath).publicKey == PrivateKey(hex"de24c43d24b8d6bc66b020ac81164206bb577c7924511d4e99431c0d60505012").publicKey)
assert(channelKeyManager.delayedPaymentPoint(channelKeyPath).publicKey == PrivateKey(hex"8aa7b8b14a7035540c331c030be0dd73e8806fb0c97a2519d63775c2f579a950").publicKey)
assert(channelKeyManager.htlcPoint(channelKeyPath).publicKey == PrivateKey(hex"94eca6eade204d6e753344c347b46bb09067c92b2fe371cf4f8362c1594c8c59").publicKey)
assert(channelKeyManager.commitmentSecret(channelKeyPath, 0).value == ShaChain.shaChainFromSeed(ByteVector32.fromValidHex("64e9d1e9840add3bb02c1525995edd28feea67f1df7a9ee075179e8541adc7a2"), 0xFFFFFFFFFFFFL))
}
test("test vectors (testnet, fundee)") {
val seed = ByteVector.fromValidHex("aeb3e9b5642cd4523e9e09164047f60adb413633549c3c6189192921311894d501")
val channelKeyManager = new LocalChannelKeyManager(seed, Block.TestnetGenesisBlock.hash)
val fundingKeyPath = makefundingKeyPath(hex"06535806c1aa73971ec4877a5e2e684fa636136c073810f190b63eefc58ca488", isFunder = false)
val fundingPub = channelKeyManager.fundingPublicKey(fundingKeyPath)
val localParams = TestConstants.Alice.channelParams.copy(fundingKeyPath = fundingKeyPath)
val channelKeyPath = channelKeyManager.keyPath(localParams, ChannelConfig.standard)
assert(fundingPub.publicKey == PrivateKey(hex"7bb8019c99fcba1c6bd0cc7f3c635c14c658d26751232d6a6350d8b6127d53c3").publicKey)
assert(channelKeyManager.revocationPoint(channelKeyPath).publicKey == PrivateKey(hex"26510db99546c9b08418fe9df2da710a92afa6cc4e5681141610dfb8019052e6").publicKey)
assert(channelKeyManager.paymentPoint(channelKeyPath).publicKey == PrivateKey(hex"0766c93fd06f69287fcc7b343916e678b83942345d4080e83f4c8a061b1a9f4b").publicKey)
assert(channelKeyManager.delayedPaymentPoint(channelKeyPath).publicKey == PrivateKey(hex"094aa052a9647228fd80e42461cae26c04f6cdd1665b816d4660df686915319a").publicKey)
assert(channelKeyManager.htlcPoint(channelKeyPath).publicKey == PrivateKey(hex"8ec62bd03b241a2e522477ae1a9861a668429ab3e443abd2aa0f2f10e2dc2206").publicKey)
assert(channelKeyManager.commitmentSecret(channelKeyPath, 0).value == ShaChain.shaChainFromSeed(ByteVector32.fromValidHex("c49e98202b0fee19f28fd3af60691aaacdd2c09e20896f5fa3ad1b9b70e4879f"), 0xFFFFFFFFFFFFL))
}
test("test vectors (mainnet, funder)") {
val seed = ByteVector.fromValidHex("d8d5431487c2b19ee6486aad6c3bdfb99d10b727bade7fa848e2ab7901c15bff01")
val channelKeyManager = new LocalChannelKeyManager(seed, Block.LivenetGenesisBlock.hash)
val fundingKeyPath = makefundingKeyPath(hex"ec1c41cd6be2b6e4ef46c1107f6c51fbb2066d7e1f7720bde4715af233ae1322", isFunder = true)
val fundingPub = channelKeyManager.fundingPublicKey(fundingKeyPath)
val localParams = TestConstants.Alice.channelParams.copy(fundingKeyPath = fundingKeyPath)
val channelKeyPath = channelKeyManager.keyPath(localParams, ChannelConfig.standard)
assert(fundingPub.publicKey == PrivateKey(hex"b97c04796850e9d74a06c9d7230d85e2ecca3598b162ddf902895ece820c8f09").publicKey)
assert(channelKeyManager.revocationPoint(channelKeyPath).publicKey == PrivateKey(hex"ee13db7f2d7e672f21395111ee169af8462c6e8d1a6a78d808f7447b27155ffb").publicKey)
assert(channelKeyManager.paymentPoint(channelKeyPath).publicKey == PrivateKey(hex"7fc18e4c925bf3c5a83411eac7f234f0c5eaef9a8022b22ec6e3272ae329e17e").publicKey)
assert(channelKeyManager.delayedPaymentPoint(channelKeyPath).publicKey == PrivateKey(hex"c0d9a3e3601d79b11b948db9d672fcddafcb9a3c0873c6a738bb09087ea2bfc6").publicKey)
assert(channelKeyManager.htlcPoint(channelKeyPath).publicKey == PrivateKey(hex"bd3ba7068d131a9ab47f33202d532c5824cc5fc35a9adada3644ac2994372228").publicKey)
assert(channelKeyManager.commitmentSecret(channelKeyPath, 0).value == ShaChain.shaChainFromSeed(ByteVector32.fromValidHex("7799de34239f97837a12191f5b60e766e32e9704bb84b0f12b539e9bf6a0dc2a"), 0xFFFFFFFFFFFFL))
}
test("test vectors (mainnet, fundee)") {
val seed = ByteVector.fromValidHex("4b809dd593b36131c454d60c2f7bdfd49d12ec455e5b657c47a9ca0f5dfc5eef01")
val channelKeyManager = new LocalChannelKeyManager(seed, Block.LivenetGenesisBlock.hash)
val fundingKeyPath = makefundingKeyPath(hex"2b4f045be5303d53f9d3a84a1e70c12251168dc29f300cf9cece0ec85cd8182b", isFunder = false)
val fundingPub = channelKeyManager.fundingPublicKey(fundingKeyPath)
val localParams = TestConstants.Alice.channelParams.copy(fundingKeyPath = fundingKeyPath)
val channelKeyPath = channelKeyManager.keyPath(localParams, ChannelConfig.standard)
assert(fundingPub.publicKey == PrivateKey(hex"46a4e818615a48a99ce9f6bd73eea07d5822dcfcdff18081ea781d4e5e6c036c").publicKey)
assert(channelKeyManager.revocationPoint(channelKeyPath).publicKey == PrivateKey(hex"c2cd9e2f9f8203f16b1751bd252285bb2e7fc4688857d620467b99645ebdfbe6").publicKey)
assert(channelKeyManager.paymentPoint(channelKeyPath).publicKey == PrivateKey(hex"1e4d3527788b39dc8ebc0ae6368a67e92eff55a43bea8e93054338ca850fa340").publicKey)
assert(channelKeyManager.delayedPaymentPoint(channelKeyPath).publicKey == PrivateKey(hex"6bc30b0852fbc653451662a1ff6ad530f311d58b5e5661b541eb57dba8206937").publicKey)
assert(channelKeyManager.htlcPoint(channelKeyPath).publicKey == PrivateKey(hex"b1be27b5232e3bc5d6a261949b4ee68d96fa61f481998d36342e2ad99444cf8a").publicKey)
assert(channelKeyManager.commitmentSecret(channelKeyPath, 0).value == ShaChain.shaChainFromSeed(ByteVector32.fromValidHex("eeb3bad6808e8bb5f1774581ccf64aa265fef38eca80a1463d6310bb801b3ba7"), 0xFFFFFFFFFFFFL))
}
test("keep the same channel seed after a migration from the old seed.dat file") {
val seed = hex"17b086b228025fa8f4416324b6ba2ec36e68570ae2fc3d392520969f2a9d0c1501"
val seedDatFile = TestUtils.createSeedFile("seed.dat", seed.toArray)
val Seeds(_, _) = NodeParams.getSeeds(seedDatFile.getParentFile)
val channelSeedDatFile = new File(seedDatFile.getParentFile, "channel_seed.dat")
assert(channelSeedDatFile.exists())
val channelSeedContent = ByteVector(Files.readAllBytes(channelSeedDatFile.toPath))
assert(seed == channelSeedContent)
}
}
| ACINQ/eclair | eclair-core/src/test/scala/fr/acinq/eclair/crypto/keymanager/LocalChannelKeyManagerSpec.scala | Scala | apache-2.0 | 11,116 |
/*
* Copyright 2015 Daniel W. H. James
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dwhjames.awswrap.dynamodb
import com.amazonaws.services.dynamodbv2.model._
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
case class GameScore(
userId: String,
gameTitle: String,
topScore: Long,
topScoreDateTime: DateTime,
wins: Long,
losses: Long
)
object GameScore {
val tableName = "GameScores"
val globalSecondaryIndexName = "GameTitleIndex"
val tableRequest =
new CreateTableRequest()
.withTableName(GameScore.tableName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withAttributeDefinitions(
Schema.stringAttribute(Attributes.userId),
Schema.stringAttribute(Attributes.gameTitle),
Schema.numberAttribute(Attributes.topScore)
)
.withKeySchema(
Schema.hashKey(Attributes.userId),
Schema.rangeKey(Attributes.gameTitle)
)
.withGlobalSecondaryIndexes(
new GlobalSecondaryIndex()
.withIndexName(GameScore.globalSecondaryIndexName)
.withProvisionedThroughput(Schema.provisionedThroughput(10L, 5L))
.withKeySchema(
Schema.hashKey(Attributes.gameTitle),
Schema.rangeKey(Attributes.topScore)
)
.withProjection(
new Projection()
.withProjectionType(ProjectionType.KEYS_ONLY)
)
)
object Attributes {
val userId = "UserId"
val gameTitle = "GameTitle"
val topScore = "TopScore"
val topScoreDateTime = "TopScoreDateTime"
val wins = "Wins"
val losses = "Losses"
}
implicit object sameScoreSerializer extends DynamoDBSerializer[GameScore] {
private val fmt = ISODateTimeFormat.dateTime
override val tableName = GameScore.tableName
override val hashAttributeName = Attributes.userId
override val rangeAttributeName = Some(Attributes.gameTitle)
override def primaryKeyOf(score: GameScore) =
Map(
Attributes.userId -> score.userId,
Attributes.gameTitle -> score.gameTitle
)
override def toAttributeMap(score: GameScore) =
Map(
Attributes.userId -> score.userId,
Attributes.gameTitle -> score.gameTitle,
Attributes.topScore -> score.topScore,
Attributes.topScoreDateTime -> fmt.print(score.topScoreDateTime),
Attributes.wins -> score.wins,
Attributes.losses -> score.losses
)
override def fromAttributeMap(item: collection.mutable.Map[String, AttributeValue]) =
GameScore(
userId = item(Attributes.userId),
gameTitle = item(Attributes.gameTitle),
topScore = item(Attributes.topScore),
topScoreDateTime = fmt.parseDateTime(item(Attributes.topScoreDateTime)),
wins = item(Attributes.wins),
losses = item(Attributes.losses)
)
}
}
| dwhjames/aws-wrap | integration/src/it/scala/dynamodb/GameScore.scala | Scala | apache-2.0 | 3,563 |
package org.jetbrains.plugins.scala
package annotator
import com.intellij.lang.annotation.AnnotationHolder
import org.jetbrains.plugins.scala.extensions.{PsiMethodExt, ResolvesTo}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScCompoundTypeElement, ScTypeElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.types.ComparingUtil._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.DesignatorOwner
import org.jetbrains.plugins.scala.lang.psi.types.api.{ScTypePresentation, _}
import org.jetbrains.plugins.scala.lang.psi.types.{ScAbstractType, ScParameterizedType, ScType, ScTypeExt, ScalaType}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import org.jetbrains.plugins.scala.project.ProjectContext
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
/**
* Jason Zaugg
*/
trait PatternAnnotator {
def annotatePattern(pattern: ScPattern, holder: AnnotationHolder, highlightErrors: Boolean) {
if (highlightErrors) {
PatternAnnotator.checkPattern(pattern, holder)
}
}
}
object PatternAnnotator {
def checkPattern(pattern: ScPattern, holder: AnnotationHolder): Unit = {
implicit val ctx: ProjectContext = pattern
for {
pType <- patternType(pattern)
eType <- pattern.expectedType
} {
checkPatternType(pType, eType, pattern, holder)
}
}
/**
* Logic in this method is mimicked from compiler sources:
* [[scala.tools.nsc.typechecker.Infer.Inferencer]] and [[scala.tools.nsc.typechecker.Checkable]]
*
*/
private def checkPatternType(_patType: ScType, exprType: ScType, pattern: ScPattern, holder: AnnotationHolder) = {
implicit val ctx: ProjectContext = pattern
val exTp = widen(ScalaType.expandAliases(exprType).getOrElse(exprType))
val patType = _patType.removeAliasDefinitions()
def freeTypeParams = freeTypeParamsOfTerms(exTp)
def exTpMatchesPattp = matchesPattern(exTp, widen(patType))
val neverMatches = !matchesPattern(exTp, patType) && isNeverSubType(exTp, patType)
def isEliminatedByErasure = (exprType.extractClass, patType.extractClass) match {
case (Some(cl1), Some(cl2)) if pattern.isInstanceOf[ScTypedPattern] => !isNeverSubClass(cl1, cl2)
case _ => false
}
object StableIdResolvesToVar {
def unapply(stable: ScStableReferenceElementPattern): Boolean = {
stable.getReferenceExpression.orNull match {
case ResolvesTo(ScalaPsiUtil.inNameContext(nameCtx)) => nameCtx match {
case param: ScClassParameter => param.isVar
case _: ScVariable => true
case _ => false
}
case _ => false
}
}
}
pattern match {
case _: ScTypedPattern if Seq(Nothing, Null, AnyVal) contains patType =>
val message = ScalaBundle.message("type.cannot.be.used.in.type.pattern", patType.presentableText)
holder.createErrorAnnotation(pattern, message)
case _: ScTypedPattern if exTp.isFinalType && freeTypeParams.isEmpty && !exTpMatchesPattp =>
val (exprTypeText, patTypeText) = ScTypePresentation.different(exprType, patType)
val message = ScalaBundle.message("scrutinee.incompatible.pattern.type", patTypeText, exprTypeText)
holder.createErrorAnnotation(pattern, message)
case ScTypedPattern(typeElem @ ScCompoundTypeElement(_, Some(_))) =>
val message = ScalaBundle.message("pattern.on.refinement.unchecked")
holder.createWarningAnnotation(typeElem, message)
case _: ScConstructorPattern if neverMatches && patType.isFinalType =>
val message = ScalaBundle.message("constructor.cannot.be.instantiated.to.expected.type", patType, exprType)
holder.createErrorAnnotation(pattern, message)
case (_: ScTuplePattern | _: ScInfixPattern) if neverMatches =>
val message = ScalaBundle.message("pattern.type.incompatible.with.expected", patType, exprType)
holder.createErrorAnnotation(pattern, message)
case _ if patType.isFinalType && neverMatches =>
val (exprTypeText, patTypeText) = ScTypePresentation.different(exprType, patType)
val message = ScalaBundle.message("pattern.type.incompatible.with.expected", patTypeText, exprTypeText)
holder.createErrorAnnotation(pattern, message)
case (_: ScTypedPattern | _: ScConstructorPattern) if neverMatches =>
val erasureWarn =
if (isEliminatedByErasure) ScalaBundle.message("erasure.warning")
else ""
val (exprTypeText, patTypeText) = ScTypePresentation.different(exprType, patType)
val message = ScalaBundle.message("fruitless.type.test", exprTypeText, patTypeText) + erasureWarn
holder.createWarningAnnotation(pattern, message)
case StableIdResolvesToVar() =>
val message = ScalaBundle.message("stable.identifier.required", pattern.getText)
holder.createErrorAnnotation(pattern, message)
case _: ScInterpolationPattern => //do not check interpolated patterns for number of arguments
case (_: ScConstructorPattern|_: ScInfixPattern) => //check number of arguments
val (reference, numPatterns) = pattern match {
case constr: ScConstructorPattern => (Option(constr.ref), constr.args.patterns.length)
case infix: ScInfixPattern =>
val numPatterns: Int = infix.rightOption match {
case Some(_: ScInfixPattern | _: ScConstructorPattern) => 2
case Some(right) => right.subpatterns match {
case Seq() => 2
case s => s.length + 1
}
case _ => 1
}
(Option(infix.operation), numPatterns)
}
reference match {
case Some(ref) =>
ref.bind() match {
case Some(ScalaResolveResult(fun: ScFunction, substitutor)) if fun.name == "unapply" => fun.returnType match {
case Right(rt) =>
val expected = ScPattern.expectedNumberOfExtractorArguments(substitutor.subst(rt), pattern, ScPattern.isOneArgCaseClassMethod(fun))
val tupleCrushingIsPresent = expected > 0 && numPatterns == 1 && !fun.isSynthetic
if (expected != numPatterns && !tupleCrushingIsPresent) { //1 always fits if return type is Option[TupleN]
val message = ScalaBundle.message("wrong.number.arguments.extractor", numPatterns.toString, expected.toString)
holder.createErrorAnnotation(pattern, message)
}
case _ =>
}
case Some(ScalaResolveResult(fun: ScFunction, substitutor)) if fun.name == "unapplySeq" => fun.returnType match {
case Right(rt) =>
//subtract 1 because last argument (Seq) may be omitted
val expected = ScPattern.expectedNumberOfExtractorArguments(substitutor.subst(rt), pattern, ScPattern.isOneArgCaseClassMethod(fun)) - 1
if (expected > numPatterns) {
val message = ScalaBundle.message("wrong.number.arguments.extractor.unapplySeq", numPatterns.toString, expected.toString)
holder.createErrorAnnotation(pattern, message)
}
case _ =>
}
case _ =>
}
case _ =>
}
case _ =>
}
}
private def widen(scType: ScType): ScType = scType match {
case designatorOwner: DesignatorOwner if designatorOwner.isSingleton =>
scType.tryExtractDesignatorSingleton
case _ =>
scType.updateRecursively {
case ScAbstractType(_, _, upper) => upper
case tpt: TypeParameterType => tpt.upperType
}
}
private def freeTypeParamsOfTerms(tp: ScType): Seq[ScType] = {
val buffer = ArrayBuffer[ScType]()
tp.visitRecursively {
case tp: TypeParameterType => buffer += tp
case _ =>
}
buffer
}
@tailrec
def matchesPattern(matching: ScType, matched: ScType): Boolean = {
def abstraction(scType: ScType, visited: Set[TypeParameterType] = Set.empty): ScType = {
scType.updateRecursively {
case tp: TypeParameterType =>
if (visited.contains(tp)) tp
else ScAbstractType(tp.typeParameter,
abstraction(tp.lowerType, visited + tp),
abstraction(tp.upperType, visited + tp)
)
}
}
matching.weakConforms(matched) || ((matching, matched) match {
case (arrayType(arg1), arrayType(arg2)) => matchesPattern(arg1, arg2)
case (_, parameterized: ScParameterizedType) =>
val newtp = abstraction(parameterized)
!matched.equiv(newtp) && matching.weakConforms(newtp)
case _ => false
})
}
//computes type of the pattern itself, shouldn't rely on expected type
def patternType(pattern: ScPattern): Option[ScType] = {
import pattern.projectContext
def constrPatternType(patternRef: ScStableCodeReferenceElement): Option[ScType] = {
patternRef.bind() match {
case Some(srr) =>
srr.getElement match {
case fun: ScFunction if fun.parameters.count(!_.isImplicitParameter) == 1 =>
fun.parametersTypes.headOption
.map(srr.substitutor.subst)
case _ => None
}
case None => None
}
}
pattern match {
case c: ScConstructorPattern =>
constrPatternType(c.ref)
case inf: ScInfixPattern =>
constrPatternType(inf.operation)
case tuple: ScTuplePattern =>
val subPat = tuple.subpatterns
val subTypes = subPat.flatMap(patternType)
if (subTypes.size == subPat.size) {
Some(TupleType(subTypes)(pattern.elementScope))
}
else None
case typed: ScTypedPattern =>
typed.typePattern.map(_.typeElement.calcType)
case naming: ScNamingPattern =>
patternType(naming.named)
case parenth: ScParenthesisedPattern =>
patternType(parenth.innerElement.orNull)
case null => None
case _: ScReferencePattern | _: ScWildcardPattern => Some(Any) //these only have expected type
case _ => pattern.`type`().toOption
}
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/PatternAnnotator.scala | Scala | apache-2.0 | 10,581 |
package model
/**
* Created by salim on 9/10/2016.
*/
trait Containable extends BaseGameObject {
var containerId: Option[Int] = None
def clearContainer(): Unit = {
containerId = None
}
}
| salimfadhley/scalamoo | src/main/scala/model/Containable.scala | Scala | mit | 205 |
package org.orbeon.oxf.common
import org.orbeon.datatypes.LocationData
object OrbeonLocationException extends OrbeonLocationExceptionTrait {
def getAllLocationData(throwable: Throwable): List[LocationData] = Nil // XXX TODO
def getRootLocationData(throwable: Throwable): Option[LocationData] = None // XXX TODO
}
| orbeon/orbeon-forms | common/js/src/main/scala/org/orbeon/oxf/common/OrbeonLocationException.scala | Scala | lgpl-2.1 | 320 |
package com.tribbloids.spookystuff.utils
import com.tribbloids.spookystuff.testutils.FunSpecx
import scala.collection.mutable.ArrayBuffer
class HasEagerObjectsSuite extends FunSpecx with HasEager {
val initialized: ArrayBuffer[Class[_]] = ArrayBuffer.empty
reifyEager()
trait HasInitCount {
initialized += this.getClass
}
object _eager extends HasInitCount with Eager
object _notEager extends HasInitCount
it("object that extends Eager should be initialized") {
assert(initialized.size == 1)
assert(initialized.contains(_eager.getClass))
}
}
| tribbloid/spookystuff | mldsl/src/test/scala/com/tribbloids/spookystuff/utils/HasEagerObjectsSuite.scala | Scala | apache-2.0 | 580 |
package com.twitter.inject.server
import com.google.inject.Module
import com.twitter.finagle.client.ClientRegistry
import com.twitter.finagle.http.HttpMuxer
import com.twitter.inject.Logging
import com.twitter.inject.app.App
import com.twitter.inject.modules.StatsReceiverModule
import com.twitter.server.Lifecycle.Warmup
import com.twitter.server.handler.ReplyHandler
import com.twitter.server.internal.{FinagleBuildRevision, PromoteToOldGenUtils}
import com.twitter.util.Await
trait TwitterServer
extends com.twitter.server.TwitterServer
with Ports
with App
with Warmup
with Logging {
addFrameworkModule(statsModule)
/* Protected */
protected def statsModule: Module = StatsReceiverModule // TODO: Use Guice v4 OptionalBinder
/** Resolve all Finagle clients before warmup method called */
protected def resolveFinagleClientsOnStartup = true
/* Overrides */
override final def main() {
super.main() // Call GuiceApp.main() to create injector
info("Startup complete, server ready.")
Await.ready(adminHttpServer)
}
/** Method to be called after injector creation */
override protected def postStartup() {
super.postStartup()
if (resolveFinagleClientsOnStartup) {
info("Resolving Finagle clients before warmup")
Await.ready {
ClientRegistry.expAllRegisteredClientsResolved() onSuccess { clients =>
info("Done resolving clients: " + clients.mkString("[", ", ", "]") + ".")
}
}
}
FinagleBuildRevision.register(injector)
}
/**
* After warmup completes, we want to run PromoteToOldGen without also signaling
* that we're healthy since we haven't successfully started our servers yet
*/
override protected def beforePostWarmup() {
super.beforePostWarmup()
PromoteToOldGenUtils.beforeServing()
}
/**
* After postWarmup, all external servers have been started, and we can now
* enable our health endpoint
*/
override protected def afterPostWarmup() {
super.afterPostWarmup()
info("Enabling health endpoint on port " + httpAdminPort)
HttpMuxer.addHandler("/health", new ReplyHandler("OK\n"))
}
} | tom-chan/finatra | inject/inject-server/src/main/scala/com/twitter/inject/server/TwitterServer.scala | Scala | apache-2.0 | 2,153 |
class A {
def abc: Int = 124
}
| dotty-staging/dotty | sbt-test/source-dependencies/local-class-inheritance/changes/A2.scala | Scala | apache-2.0 | 33 |
package models.actors
import javax.inject.{Inject, Singleton}
import akka.actor.Actor
import models.actors.SlackServant.InviteMember
import models.repository.IRepositories
import play.api.Play
import play.api.Play.current
import slack.api.SlackApiClient
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Invites attendees to Slack channels
*/
@Singleton
class SlackServant @Inject()(repos: IRepositories) extends Actor {
def receive = {
case InviteMember(personId) =>
Play.configuration.getString("slack.token") match {
case Some(token) =>
repos.person.find(personId) map {
case Some(person) =>
val client = SlackApiClient(token)
val (hmSupportersId, hmGetToKnowYouId, hmCoffeeId) = ("C04QY3M75", "C052YRSCX", "C0CBWL2RK")
val channels = Seq(hmSupportersId, hmGetToKnowYouId, hmCoffeeId)
client.inviteUser(person.email, channels, person.firstName)
case None => Nil
}
case None => Nil
}
}
}
object SlackServant {
final case class InviteMember(personId: Long)
}
| HappyMelly/teller | app/models/actors/SlackServant.scala | Scala | gpl-3.0 | 1,122 |
package me.fornever.nano
import akka.actor.{ActorRef, Actor, ActorLogging}
import org.mashupbots.socko.events.WebSocketFrameEvent
import io.netty.channel.Channel
class WsCore extends Actor with ActorLogging {
var clients = Map[Channel, ActorRef]()
def receive = {
case frame: WebSocketFrameEvent =>
val request = frame.readText()
val channel = frame.context.channel()
dispatchMessage(channel, request)
}
def dispatchMessage(channel: Channel, request: String) {
val message =
clients.get(channel) match {
//case Some(client) => client ! WsMessage(request)
case None => authorize(channel, request)
}
}
def authorize(channel: Channel, request: String) {
}
}
| ForNeVeR/nanorevolution | main/src/main/scala/me/fornever/nano/WsCore.scala | Scala | mit | 722 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{LogoutEvent, Silhouette}
import forms._
import models.User
import play.api.i18n.MessagesApi
import utils.MyEnv
import scala.concurrent.Future
class ApplicationController @Inject()(val messagesApi: MessagesApi, val silhouette: Silhouette[MyEnv[User]]) extends WebController {
/**
* Handles the index action.
*
* @return The result to display.
*/
//시작 점 home
def index = SecuredAction.async { implicit request =>
Future.successful(Ok(views.html.home(request.identity)))
}
/**
* Handles the Sign In action.
*
* @return The result to display.
*/
def signIn = UserAwareAction.async { implicit request =>
request.identity match {
case Some(user) => Future.successful(Redirect(routes.ApplicationController.index()))
case None => Future.successful(Ok(views.html.signIn(SignInForm.form)))
}
}
/**
* Handles the Sign Up action.
*
* @return The result to display.
*/
def signUp = UserAwareAction.async { implicit request =>
request.identity match {
case Some(user) => Future.successful(Redirect(routes.ApplicationController.index()))
case None => Future.successful(Ok(views.html.signUp(SignUpForm.form)))
}
}
/**
* Handles the Sign Out action.
*
* @return The result to display.
*/
def signOut = SecuredAction.async { implicit request =>
val result = Redirect(routes.ApplicationController.index())
env.eventBus.publish(LogoutEvent(request.identity, request))
env.authenticatorService.discard(request.authenticator, result)
}
}
| chang850/play_slick | app/controllers/ApplicationController.scala | Scala | mit | 1,662 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Float => jFloat, Long => jLong}
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HistogramTest extends Specification with StatTestHelper {
def createStat[T](attribute: String, bins: Int, min: String, max: String, observe: Boolean): Histogram[T] = {
val s = Stat(sft, s"Histogram($attribute,$bins,'$min','$max')")
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Histogram[T]]
}
def stringStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[String]("strAttr", bins, min, max, observe)
def intStat(bins: Int, min: Int, max: Int, observe: Boolean = true) =
createStat[Integer]("intAttr", bins, min.toString, max.toString, observe)
def longStat(bins: Int, min: Long, max: Long, observe: Boolean = true) =
createStat[jLong]("longAttr", bins, min.toString, max.toString, observe)
def floatStat(bins: Int, min: Float, max: Float, observe: Boolean = true) =
createStat[jFloat]("floatAttr", bins, min.toString, max.toString, observe)
def doubleStat(bins: Int, min: Double, max: Double, observe: Boolean = true) =
createStat[jDouble]("doubleAttr", bins, min.toString, max.toString, observe)
def dateStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[Date]("dtg", bins, min, max, observe)
def geomStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[Geometry]("geom", bins, min, max, observe)
def toDate(string: String) = GeoToolsDateFormat.parseDateTime(string).toDate
def toGeom(string: String) = WKTUtils.read(string)
"RangeHistogram stat" should {
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat(20, "abc000", "abc200", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 20
stat.bounds mustEqual ("abc000", "abc200")
forall(0 until 20)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = stringStat(36, "abc000", "abc099")
stat.isEmpty must beFalse
stat.length mustEqual 36
(0 until 36).map(stat.count).sum mustEqual 100
}
"serialize and deserialize" >> {
val stat = stringStat(20, "abc000", "abc200")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = stringStat(20, "abc000", "abc200", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = stringStat(20, "abc000", "abc200")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"combine two RangeHistograms" >> {
val stat = stringStat(36, "abc000", "abc099")
val stat2 = stringStat(36, "abc100", "abc199", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 36
(0 until 36).map(stat2.count).sum mustEqual 100
stat += stat2
stat.length mustEqual 36
(0 until 36).map(stat.count).sum mustEqual 200
stat2.length mustEqual 36
(0 until 36).map(stat2.count).sum mustEqual 100
}
"combine two RangeHistograms with empty values" >> {
val stat = stringStat(100, "0", "z", observe = false)
val stat2 = stringStat(100, "alpha", "gamma", observe = false)
stat.bins.add("0")
stat2.bins.add("alpha")
stat2.bins.add("beta")
stat2.bins.add("gamma")
stat2.bins.add("cappa")
stat2 += stat
stat2.bounds mustEqual ("00000", "gamma")
}
"clear" >> {
val stat = stringStat(20, "abc000", "abc200")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 0)
}
}
"work with integers" >> {
"be empty initially" >> {
val stat = intStat(20, 0, 199, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = intStat(20, 0, 199)
stat.isEmpty must beFalse
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
}
"correctly remove values" >> {
val stat = intStat(20, 0, 199)
stat.isEmpty must beFalse
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
features.take(50).foreach(stat.unobserve)
forall(5 until 10)(stat.count(_) mustEqual 10)
forall((0 until 5) ++ (10 until 20))(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = intStat(20, 0, 199)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Integer]]
unpacked.asInstanceOf[Histogram[Integer]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Integer]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = intStat(20, 0, 199, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Integer]]
unpacked.asInstanceOf[Histogram[Integer]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Integer]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = intStat(20, 0, 199)
val stat2 = intStat(20, 0, 199, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 20
forall(0 until 10)(stat2.count(_) mustEqual 0)
forall(10 until 20)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 10)
stat2.length mustEqual 20
forall(0 until 10)(stat2.count(_) mustEqual 0)
forall(10 until 20)(stat2.count(_) mustEqual 10)
}
"combine two RangeHistograms with different bounds" >> {
val stat = intStat(20, 0, 99)
val stat2 = intStat(20, 100, 199, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 5)
stat2.length mustEqual 20
forall(0 until 20)(stat2.count(_) mustEqual 5)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 10)
}
"combine two RangeHistograms with different lengths" >> {
val stat = intStat(20, 0, 199)
val stat2 = intStat(10, 0, 199, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
stat2.length mustEqual 10
forall(0 until 5)(stat2.count(_) mustEqual 0)
forall(5 until 10)(stat2.count(_) mustEqual 20)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 10)
}
"combine two RangeHistograms with empty values" >> {
val stat = intStat(20, -100, 300)
val stat2 = intStat(20, 50, 249, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall((0 until 5) ++ (10 until 20))(stat.count(_) mustEqual 0)
forall(5 until 10)(stat.count(_) mustEqual 20)
stat2.length mustEqual 20
forall((0 until 5) ++ (15 until 20))(stat2.count(_) mustEqual 0)
forall(5 until 15)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
stat.bins.counts mustEqual Array(6, 8, 12, 8, 12, 8, 12, 8, 12, 8, 16, 10, 10, 10, 10, 10, 10, 10, 10, 10)
(0 until stat.length).map(stat.count).sum mustEqual 200
}
"clear" >> {
val stat = intStat(20, 0, 199)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 0)
}
}
"work with longs" >> {
"be empty initially" >> {
val stat = longStat(10, 0, 99, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 10
stat.bounds mustEqual (0, 99)
forall(0 until 10)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = longStat(10, 0, 99)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0, 99)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = longStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jLong]]
unpacked.asInstanceOf[Histogram[jLong]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jLong]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = longStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jLong]]
unpacked.asInstanceOf[Histogram[jLong]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jLong]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = longStat(10, 0, 99)
val stat2 = longStat(10, 100, 199, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.bounds mustEqual (0, 199)
forall(0 until 10)(stat.count(_) mustEqual 20)
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = longStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with floats" >> {
"be empty initially" >> {
val stat = floatStat(7, 90, 110, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 7
stat.bounds mustEqual (90f, 110f)
forall(0 until 7)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = floatStat(10, 0, 100)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0f, 100f)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = floatStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jFloat]]
unpacked.asInstanceOf[Histogram[jFloat]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jFloat]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = floatStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jFloat]]
unpacked.asInstanceOf[Histogram[jFloat]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jFloat]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = floatStat(10, 0, 100)
val stat2 = floatStat(10, 100, 200, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
stat2.bounds mustEqual (100f, 200f)
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.count(0) mustEqual 15
forall(1 until 9)(stat.count(_) mustEqual 20)
stat.count(9) mustEqual 25
stat2.length mustEqual 10
stat2.bounds mustEqual (100f, 200f)
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = floatStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with doubles" >> {
"be empty initially" >> {
val stat = doubleStat(7, 90, 110, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 7
stat.bounds mustEqual (90.0, 110.0)
forall(0 until 7)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = doubleStat(10, 0, 99)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0.0, 99.0)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = doubleStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = doubleStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = doubleStat(10, 0, 100)
val stat2 = doubleStat(10, 100, 200, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.bounds mustEqual (0.0, 200.0)
stat.count(0) mustEqual 15
forall(1 until 9)(stat.count(_) mustEqual 20)
stat.count(9) mustEqual 25
(0 until 10).map(stat.count).sum mustEqual 200
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = doubleStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with dates" >> {
"be empty initially" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 24
stat.bounds mustEqual (toDate("2012-01-01T00:00:00.000Z"), toDate("2012-01-03T00:00:00.000Z"))
forall(0 until 24)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
stat.isEmpty must beFalse
stat.length mustEqual 24
stat.bounds mustEqual (toDate("2012-01-01T00:00:00.000Z"), toDate("2012-01-03T00:00:00.000Z"))
forall(0 until 2)(stat.count(_) mustEqual 10)
forall(2 until 12)(stat.count(_) mustEqual 8)
forall(12 until 24)(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Date]]
unpacked.asInstanceOf[Histogram[Date]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Date]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
val stat2 = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 24
forall(0 until 12)(stat2.count(_) mustEqual 0)
forall((12 until 14) ++ (16 until 24))(stat2.count(_) mustEqual 8)
forall(15 until 16)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 24
forall((0 until 2) ++ (15 until 16))(stat.count(_) mustEqual 10)
forall((2 until 14) ++ (16 until 24))(stat.count(_) mustEqual 8)
stat2.length mustEqual 24
forall(0 until 12)(stat2.count(_) mustEqual 0)
forall((12 until 14) ++ (16 until 24))(stat2.count(_) mustEqual 8)
forall(15 until 16)(stat2.count(_) mustEqual 10)
}
"combine two RangeHistograms with weekly splits" >> {
// simulates the way date histograms will be gathered as we track stats dynamically
val stat = dateStat(4, "2012-01-01T00:00:00.000Z", "2012-01-28T23:59:59.999Z", observe = false)
val stat2 = dateStat(5, "2012-01-01T00:00:00.000Z", "2012-02-04T23:59:59.999Z", observe = false)
val attributes = Array.ofDim[AnyRef](7)
(1 to 28).foreach { i =>
attributes(6) = f"2012-01-$i%02dT12:00:00.000Z"
stat.observe(SimpleFeatureBuilder.build(sft, attributes, ""))
}
(29 to 31).foreach { i =>
attributes(6) = f"2012-01-$i%02dT12:00:00.000Z"
stat2.observe(SimpleFeatureBuilder.build(sft, attributes, ""))
}
(1 to 4).foreach { i =>
attributes(6) = f"2012-02-$i%02dT12:00:00.000Z"
stat2.observe(SimpleFeatureBuilder.build(sft, attributes, ""))
}
stat.length mustEqual 4
forall(0 until 4)(stat.count(_) mustEqual 7)
stat2.length mustEqual 5
forall(0 until 4)(stat2.count(_) mustEqual 0)
stat2.count(4) mustEqual 7
stat += stat2
stat.length mustEqual 5
forall(0 until 5)(stat.count(_) mustEqual 7)
stat2.length mustEqual 5
forall(0 until 4)(stat2.count(_) mustEqual 0)
stat2.count(4) mustEqual 7
}
"clear" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 24
forall(0 until 24)(stat.count(_) mustEqual 0)
}
}
"work with geometries" >> {
"be empty initially" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 32
stat.bounds mustEqual (toGeom("POINT(-180 -90)"), toGeom("POINT(180 90)"))
forall(0 until 32)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
stat.isEmpty must beFalse
stat.length mustEqual 32
stat.bounds mustEqual (toGeom("POINT(-180 -90)"), toGeom("POINT(180 90)"))
stat.count(18) mustEqual 45
stat.count(19) mustEqual 44
stat.count(20) mustEqual 9
stat.count(22) mustEqual 1
stat.count(24) mustEqual 1
forall((0 until 18) ++ Seq(21, 23) ++ (25 until 32))(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
val stat2 = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 32
stat2.count(25) mustEqual 10
stat2.count(27) mustEqual 20
stat2.count(30) mustEqual 46
stat2.count(31) mustEqual 24
forall((0 until 25) ++ Seq(26, 28, 29))(stat2.count(_) mustEqual 0)
stat += stat2
stat.count(18) mustEqual 45
stat.count(19) mustEqual 44
stat.count(20) mustEqual 9
stat.count(22) mustEqual 1
stat.count(24) mustEqual 1
stat.count(25) mustEqual 10
stat.count(27) mustEqual 20
stat.count(30) mustEqual 46
stat.count(31) mustEqual 24
forall((0 until 18) ++ Seq(21, 23, 26, 28, 29))(stat.count(_) mustEqual 0)
stat2.length mustEqual 32
stat2.count(25) mustEqual 10
stat2.count(27) mustEqual 20
stat2.count(30) mustEqual 46
stat2.count(31) mustEqual 24
forall((0 until 25) ++ Seq(26, 28, 29))(stat2.count(_) mustEqual 0)
}
"clear" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 32
forall(0 until 32)(stat.count(_) mustEqual 0)
}
}
}
}
| nagavallia/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/HistogramTest.scala | Scala | apache-2.0 | 25,768 |
object SCL4139 {
abstract class Graph {
type Edge
type Node <: NodeIntf
abstract class NodeIntf {
}
}
abstract class DirectedGraph extends Graph {
type Edge <: EdgeImpl
class EdgeImpl
class NodeImpl extends NodeIntf {
// with this line...
self: Node =>
def connectWith(node : Node): Edge = {
/*start*/newEdge(this)/*end*/
exit()
}
}
protected def newEdge(from: Node) = 1
protected def newEdge(from: Byte) = false
}
}
//Int | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL4139.scala | Scala | apache-2.0 | 511 |
package com.geishatokyo.diffsql.sqlite
import com.geishatokyo.diffsql.{ SQLnizer}
import com.geishatokyo.diffsql.ast._
import com.geishatokyo.diffsql.diff.Diff
import com.geishatokyo.diffsql.ast.Key._
import com.geishatokyo.diffsql.ast.Table
import com.geishatokyo.diffsql.diff.Diff
import com.geishatokyo.diffsql.ast.Column
/**
* Created by takeshita on 14/02/17.
*/
class Sqlitenizer extends SQLnizer {
def toCreateTable(table: Table): String = {
s"""CREATE TABLE IF NOT EXISTS ${table.name.name} (
${table.fields.map({
case c : Column => toColumnDefinition(c)
case k : Key => toIndexDefinition(k)
}).mkString(",\\n ")}
)${table.options.mkString(",\\n")};"""
}
def toDropTable(table: Table) : String = {
s"DROP TABLE IF EXISTS ${table.name.name};"
}
def toAlterSQL(diff: Diff): List[String] = {
columns(diff) ::: keys(diff)
}
def columns(diff : Diff) = {
diff.columns.add.map( c => {
s"ALTER TABLE ${diff.tableName} ADD COLUMN ${toColumnDefinition(c)};"
}) :::
// Drop column is not supported in Sqlite.
/*diff.columns.remove.map( c => {
s"ALTER TABLE ${diff.tableName} DROP COLUMN ${c.name.name};"
}) :::*/
diff.columns.alter.map(c => {
s"ALTER TABLE ${diff.tableName} MODIFY ${toColumnDefinition(c)};"
})
}
def toColumnDefinition(column: Column): String = {
s"${column.name} ${column.dataType} ${column.options.mkString(" ")}"
}
def keys(diff : Diff) = {
diff.keys.remove.map(k => {
if(k.keyType == KeyType.PrimaryKey){
s"ALTER TABLE ${diff.tableName} DROP PRIMARY KEY;"
}else{
s"ALTER TABLE ${diff.tableName} DROP KEY ${k.name.get.name};"
}
}) :::
diff.keys.add.map(k => {
s"ALTER TABLE ${diff.tableName} ADD ${toIndexDefinition(k)};"
})
}
def toIndexDefinition( key : Key) = {
val prefix = key match{
case pk : PrimaryKey => "PRIMARY KEY"
case uk : UniqueKey => "UNIQUE KEY"
case nk : NormalKey => "KEY"
case fk : FullTextKey => "FULLTEXT"
}
s"${prefix}${key.name.map(" " + _.name).getOrElse("")}${key.algorithm.map(" USING " + _).getOrElse("")} ${key.columns.map(_.name).mkString("(",",",")")} ${key.order.map(_.toString).getOrElse("")}"
}
}
| geishatokyo/diff-sql-table | parser/src/main/scala/com/geishatokyo/diffsql/sqlite/Sqlitenizer.scala | Scala | mit | 2,268 |
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.tester
import com.easyforger.base.{EasyForger, LootTableLoadEventReplacerTest}
import com.easyforger.creatures.CreaturesHandlerTest
import com.easyforger.items.EFItemArmorTest
import com.easyforger.recipes.test._ // scalastyle:ignore
import com.easyforger.util.Version
import net.minecraftforge.fml.common.Mod
import net.minecraftforge.fml.common.Mod.EventHandler
import net.minecraftforge.fml.common.event.FMLPostInitializationEvent
import utest.TestRunner
import scala.util.control.NonFatal
@Mod(modid = uTestRunnerMod.modId, name = "EasyForger Integration Tester", version = Version.version, modLanguage = "scala")
object uTestRunnerMod extends EasyForger {
final val modId = "easyforger-integration-tester"
@EventHandler
def postInit(event: FMLPostInitializationEvent): Unit = {
// the tests are manually executed here, so we don't need to extend TestSuite in the tests themselves
// and maybe we actually _can't_ do it, or the tests might be picked up by gradle in the test phase, and they would fail
val allTests = Map(
CreaturesHandlerTest.getClass.getSimpleName -> CreaturesHandlerTest.tests,
EFItemArmorTest.getClass.getSimpleName -> EFItemArmorTest.tests,
RecipeAcronymsTest.getClass.getSimpleName -> RecipeAcronymsTest.tests,
RecipeCompositionTest.getClass.getSimpleName -> RecipeCompositionTest.tests,
RegisterRecipesTest.getClass.getSimpleName -> RegisterRecipesTest.tests,
ShapedRecipesTest.getClass.getSimpleName -> ShapedRecipesTest.tests,
SmeltingRecipesTest.getClass.getSimpleName -> SmeltingRecipesTest.tests,
LootTableLoadEventReplacerTest.getClass.getSimpleName -> LootTableLoadEventReplacerTest.tests
)
println(" =========== Running integration tests =========== ") // scalastyle:ignore
allTests.map { case (name, tests) =>
try {
TestRunner.runAndPrint(tests, name)
} catch {
case NonFatal(t) =>
println(s"Failed to run $name") // scalastyle:ignore
t.printStackTrace()
}
}
println(" =========== Finished integration tests ========== ") // scalastyle:ignore
}
}
| easyforger/easyforger | tester/src/main/scala/com/easyforger/tester/uTestRunnerMod.scala | Scala | gpl-3.0 | 2,338 |
package sgwlpr.codegen
import scala.xml._
import scala.collection._
import java.io.{ File, BufferedWriter, FileWriter }
import FieldTypes._
// What I'd give for autoproxy...
object FieldTypes {
sealed abstract class FieldType(val typeMapping: String, val size: Int)
case object Int8 extends FieldType("Byte", 1)
case object Int16 extends FieldType("Short", 2)
case object Int32 extends FieldType("Int", 4)
case object Int64 extends FieldType("Long", 8)
case object Float extends FieldType("Float", 4)
case object Vec2 extends FieldType("Vector2", 2 * 4)
case object Vec3 extends FieldType("Vector3", 3 * 4)
case object Vec4 extends FieldType("Vector4", 4 * 4)
case object Uuid16 extends FieldType("?uuid16", 16) // XXX - What is the size of a uuid?
case object Uuid28 extends FieldType("?uuid28", 28)
case object AgentId extends FieldType("AgentId", 4) // XXX - this should be AgentId and use implicits
case object Ascii extends FieldType("String", 2) // XXX - is this right?
case object Utf16 extends FieldType("String", 2) // XXX - should be right?
case object Packed extends FieldType("?packed", -65535) // XXX - FixMe.
case object Nested extends FieldType("ERROR", -1)
}
/**
* Internal representation of the array information associated with a packet
* @param length the length of the array
* @param fixedLength is the length constant
* @param prefixType the [[sgwlpr.codegen.FieldType]] of the prefix that needs to be serialised if the length is not constant
*/
case class ArrayInfo(length: Int, fixedLength: Boolean, prefixType: FieldType)
/**
* Base PacketField trait
*/
abstract sealed trait PacketField {
def info: Info
def arrayInfo: Option[ArrayInfo]
def size: Int
/**
* Calculates the size of a packet taking available [[sgwlpr.codegen.ArrayInfo]] into account
*
* @param size the base packet size that is used for calculations
*/
protected def realSize(size: Int) = {
if (arrayInfo == None)
size
else {
val ai = arrayInfo.get
if (ai.fixedLength)
ai.length * size
else
// XXX - Should the size really be independent of the list's length
ai.prefixType.size + ai.length * size
}
}
}
/**
* A normal packet field
*
* @param fieldType the type for this field
* @param info the info metadata for this field
* @param arrayInfo the array metadata for this field
*/
case class Field(fieldType: FieldType, info: Info, arrayInfo: Option[ArrayInfo] = None) extends PacketField {
def typeMapping = fieldType.typeMapping
def size = realSize(fieldType.size)
}
/**
* A nested packet field
*
* @param info the info metadata for this field
* @param members a list of the nested fields
* @param arrayInfo the array metadata for this field
*/
case class NestedField(info: Info, members: List[Field], arrayInfo: Option[ArrayInfo] = None) extends PacketField {
private def memberSize = members.foldLeft(0) { case (a, b) => a + b.size }
def size = realSize(memberSize)
}
/**
* Internal representation of a <Packet /> node
*
* @param header the packet header
* @param fields the list of fields contained in this packet
* @param info the metadata associated with this packet
*/
case class Packet(header: Int, fields: List[PacketField], info: Info) {
/** Creates a default packet name if none is supplied by the metadata */
def name: String = info.name match {
case None => "Packet%d".format(header)
case Some(name) => name + "Packet"
}
/** Adds up the sizes of the internal fields */
def size = 2 + fields.foldLeft(0) { case (a, b) => a + b.size }
}
// XXX - name should not be var
/**
* Internal representation of packet and field metadata
*
* @param name the name for the packet/field
* @param description the description for the packet/field
* @param author the author for the packet/field
*/
case class Info(var name: Option[String], description: Option[String], author: Option[String], value: Option[String])
object Main extends App {
/** Generates scala code from the packet template xml */
override def main(args: Array[String]): Unit = {
// XXX - get these as config options from args
val target = "codegen/src-gen"
val packageName = "sgwlpr"
val fileName = "codegen/PacketTemplates.xml"
val packetMap = mutable.Map.empty[String, List[Packet]]
// parse XML to packets
(XML.loadFile(fileName) \\ "Packets" \ "Direction").toList.foreach { direction =>
// convert the verbose abbreviations to less verbose ones
val dir = (direction \ "@abbr").text match {
case "LStoC" => "l2c"
case "GStoC" => "g2c"
case "CtoLS" => "c2l"
case "CtoGS" => "c2g"
case _ => "error"
}
val packets = (direction \ "Packet").toList
packetMap += (dir -> packets.map(deserialisePacket).toList)
}
// save packets as scala classes
packetMap.toList.foreach {
case (d, p) => {
import org.clapper.scalasti.StringTemplateGroup
val f = new File(target + "/" + d + ".scala")
f.delete
f.createNewFile
val bw = new BufferedWriter(new FileWriter(f))
val tpl = new StringTemplateGroup("", new File("codegen/templates")).template("base")
tpl.setAttribute("package", packageName)
tpl.setAttribute("dir", d)
tpl.setAttribute("content", p.foldLeft("") { (a, b) => a + "\n" + CodeGenerator.generate(b) })
val des = new StringTemplateGroup("", new File("codegen/templates")).template("deserialiser")
des.setAttribute("cases", p.map { packet =>
"case %d => %s(buf)\n".format(packet.header, packet.name)
})
bw.write(tpl.toString)
bw.write(des.toString)
bw.flush
bw.close
}
}
}
/** This method implicitly converts a String to an Option[String] */
implicit def string2Option(str: String): Option[String] = {
// XXX - Is there no Scala internal for this?
if (str.isEmpty)
None
else
Some(str)
}
/** This method implicitly converts a String to the representative FieldType */
implicit def string2FieldType(str: String): FieldType = str match {
case "int8" => Int8
case "int16" => Int16
case "int32" => Int32
case "int64" => Int64
case "packed" => Packed
case "float" => Float
case "vec2" => Vec2
case "vec3" => Vec3
case "vec4" => Vec4
case "uuid16" => Uuid16
case "uuid28" => Uuid28
case "agentid" => AgentId
case "ascii" => Ascii
case "utf16" => Utf16
case "nested" => Nested
}
/** Parses the <Info /> node */
def deserialiseInfo(info: NodeSeq): Info = Info(
(info \ "Name").text,
(info \ "Description").text,
(info \ "Author").text,
(info \ "Value").text)
/* Parses the <Packet /> node */
def deserialisePacket(packet: NodeSeq): Packet = {
val header = (packet \ "@header").text.toInt
val info = deserialiseInfo(packet \ "Info")
val fields = deserialiseFields(packet \ "Field")
Packet(header, fields, info)
}
/** Parses <Field /> nodes */
def deserialiseFields(fields: NodeSeq): List[PacketField] = {
var unknownCount = 0
fields.map(deserialiseField).toList.map { field =>
if (field.info.name == None) {
field.info.name = Some("unknown" + unknownCount)
unknownCount += 1
}
field
}
}
/** Parses a <Field /> node */
def deserialiseField(field: NodeSeq): PacketField = {
val fieldType: FieldType = (field \ "@type").text
val info = deserialiseInfo(field \ "Info")
// Create the ArrayInfo for the field
val array: Option[ArrayInfo] = {
val occursStr = (field \ "@occurs").text
if (occursStr.isEmpty)
None
else {
val occurs = occursStr.toInt
val static = (field \ "@static").text match {
case "false" => false
case _ => true
}
val prefixType: FieldType = (field \ "@prefixType").text
Some(ArrayInfo(occurs, static, prefixType))
}
}
// Special treatment for nested nodes
if (fieldType == Nested) {
val fields = deserialiseFields(field \ "Field")
NestedField(info, fields.asInstanceOf[List[Field]], array) // XXX - Fix this zZz hack
} else
Field(fieldType, info, array)
}
}
| th0br0/sgwlpr | codegen/src/main/scala/sgwlpr/codegen/Main.scala | Scala | agpl-3.0 | 8,337 |
package edvorg.cf.zepto.heap
import scala.annotation.tailrec
class Heap[T](data: collection.mutable.Buffer[T], less: (T, T) => Boolean,
priorLeft: Boolean, onSwap: (Int, Int) => Unit) {
final def swap(i: Int, j: Int) {
val tl = data(i)
data(i) = data(j)
data(j) = tl
onSwap(i, j)
}
final def siftDown(beg: Int, end: Int) = {
@tailrec
def impl(beg: Int, acc: Int): Int = {
val left = beg * 2 + 1
val right = beg * 2 + 2
if (right >= end) {
if (left < end && less(data(beg), data(left))) {
swap(beg, left)
impl(left, acc + 1)
}
else acc
}
else if (left < end) {
if (less(data(beg) ,data(left))) {
if (less(data(beg), data(right))) {
val l = if (priorLeft) !less(data(left), data(right))
else less(data(right), data(left))
if (l) {
swap(beg, left)
impl(left, acc + 1)
}
else {
swap(beg, right)
impl(right, acc + 1)
}
}
else {
swap(beg, left)
impl(left, acc + 1)
}
}
else if (less(data(beg), data(right))) {
swap(beg, right)
impl(right, acc + 1)
}
else acc
}
else acc
}
impl(beg, 0)
}
final def makeHeap(end: Int) = {
var count = 0
var i = (end - 1) / 2
do {
count += siftDown(i, end)
i -= 1
} while (i >= 0)
count
}
final def heapSort(end: Int) = {
var size = end;
var count = 0
do {
swap(0, size - 1)
size -= 1
count += siftDown(0, size)
}
while (size > 1)
count
}
@tailrec
final def shiftUp(beg: Int, elem: Int) {
val parent = (elem - 1) / 2
val bp = beg + parent
val be = beg + elem
if (parent >= 0 && less(data(bp), data(be))) { // has parent
swap(be, bp)
shiftUp(beg, parent)
}
}
}
| edvorg/scala-cf | src/main/scala/edvorg/cf/zepto/Heap/Heap.scala | Scala | gpl-3.0 | 1,684 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.graphite.sender
import akka.actor.{ ActorRef, FSM }
private[sender] trait TcpSenderFSM extends FSM[TcpSenderState, TcpSenderData]
private[sender] sealed trait TcpSenderState
private[sender] case object WaitingForConnection extends TcpSenderState
private[sender] case object Running extends TcpSenderState
private[sender] case object RetriesExhausted extends TcpSenderState
private[sender] sealed trait TcpSenderData
private[sender] case object NoData extends TcpSenderData
private[sender] final case class DisconnectedData(retry: Retry) extends TcpSenderData
private[sender] final case class ConnectedData(connection: ActorRef, retry: Retry) extends TcpSenderData
| gatling/gatling | gatling-graphite/src/main/scala/io/gatling/graphite/sender/TcpSenderFSM.scala | Scala | apache-2.0 | 1,306 |
package com.crobox.clickhouse.dsl.language
import com.crobox.clickhouse.dsl.{ExpressionColumn, TableColumn, _}
trait HigherOrderFunctionTokenizer {
self: ClickhouseTokenizerModule =>
private def tokenizeHOFunc[I, O, R](col: HigherOrderFunction[I, O, R])(implicit ctx: TokenizeContext): String =
if (col.func1.isDefined) {
"x -> " + tokenizeColumn(col.func1.get(RefColumn[I]("x"))) + ", "
} else if (col.func2.isDefined) {
"(x,y) -> " + tokenizeColumn(col.func2.get(RefColumn[I]("x"), RefColumn[I]("y"))) + ", "
} else if (col.func3.isDefined) {
"(x,y,z) -> " + tokenizeColumn(col.func3.get(RefColumn[I]("x"), RefColumn[I]("y"), RefColumn[I]("z"))) + ", "
} else ""
private def tokenizeHOParams[I, O, R](col: HigherOrderFunction[I, O, R])(implicit ctx: TokenizeContext): String =
tokenizeHOFunc(col) + tokenizeColumns(col.arrays.map(_.column))
def tokenizeHigherOrderFunction(col: HigherOrderFunction[_, _, _])(implicit ctx: TokenizeContext): String =
col match {
case col: ArrayAll[_, _] => s"arrayAll(${tokenizeHOParams(col)})"
case col: ArrayAvg[_, _] => s"arrayAvg(${tokenizeHOParams(col)})"
case col: ArrayCount[_] => s"arrayCount(${tokenizeHOParams(col)})"
case col: ArrayCumSum[_, _] => s"arrayCumSum(${tokenizeHOParams(col)})"
case col: ArrayExists[_] => s"arrayExists(${tokenizeHOParams(col)})"
case col: ArrayFill[_] => s"arrayFill(${tokenizeHOParams(col)})"
case col: ArrayFilter[_] => s"arrayFilter(${tokenizeHOParams(col)})"
case col: ArrayFirst[_] => s"arrayFirst(${tokenizeHOParams(col)})"
case col: ArrayFirstIndex[_] => s"arrayFirstIndex(${tokenizeHOParams(col)})"
case col: ArrayMap[_, _] => s"arrayMap(${tokenizeHOParams(col)})"
case col: ArrayMax[_, _] => s"arrayMax(${tokenizeHOParams(col)})"
case col: ArrayMin[_, _] => s"arrayMin(${tokenizeHOParams(col)})"
case col: ArrayReverseFill[_] => s"arrayReverseFill(${tokenizeHOParams(col)})"
case col: ArrayReverseSort[_, _] => s"arrayReverseSort(${tokenizeHOParams(col)})"
case col: ArrayReverseSplit[_] => s"arrayReverseSplit(${tokenizeHOParams(col)})"
case col: ArraySort[_, _] => s"arraySort(${tokenizeHOParams(col)})"
case col: ArraySplit[_] => s"arraySplit(${tokenizeHOParams(col)})"
case col: ArraySum[_, _] => s"arraySum(${tokenizeHOParams(col)})"
}
}
| crobox/clickhouse-scala-client | dsl/src/main/scala/com.crobox.clickhouse/dsl/language/HigherOrderFunctionTokenizer.scala | Scala | lgpl-3.0 | 2,515 |
/**
* Created by jguzman on 25-08-15.
*/
object Lzw {
def compress(tc:String) = {
//initial dictionary
val startDict = (1 to 255).map(a=>(""+a.toChar,a)).toMap
println(" start Dictionary ...")
println( startDict )
val (fullDict, result, remain) = tc.foldLeft ((startDict, List[Int](), "")) {
case ((dict,res,leftOver),nextChar) =>
if (dict.contains(leftOver + nextChar)) // current substring already in dict
(dict, res, leftOver+nextChar)
else if (dict.size < 4096) // add to dictionary
(dict + ((leftOver+nextChar, dict.size+1)), dict(leftOver) :: res, ""+nextChar)
else // dictionary is full
(dict, dict(leftOver) :: res, ""+nextChar)
}
if (remain.isEmpty) result.reverse else (fullDict(remain) :: result).reverse
}
def decompress(ns: List[Int]): String = {
val startDict = (1 to 255).map(a=>(a,""+a.toChar)).toMap
val (_, result, _) =
ns.foldLeft[(Map[Int, String], List[String], Option[(Int, String)])]((startDict, Nil, None)) {
case ((dict, result, conjecture), n) => {
dict.get(n) match {
case Some(output) => {
val (newDict, newCode) = conjecture match {
case Some((code, prefix)) => ((dict + (code -> (prefix + output.head))), code + 1)
case None => (dict, dict.size + 1)
}
(newDict, output :: result, Some(newCode -> output))
}
case None => {
// conjecture being None would be an encoding error
val (code, prefix) = conjecture.get
val output = prefix + prefix.head
(dict + (code -> output), output :: result, Some(code + 1 -> output))
}
}
}
}
result.reverse.mkString("")
}
def main(args: Array[String]): Unit = {
// test
val text = "ABRACADABRA"
val compressed = compress(text)
println("\\n -- Comprimido --")
println(compressed)
println("\\n ------ ")
val result = decompress(compressed)
println("\\n -- Resultado --")
//TOBEORNOTTOBEORTOBEORNOT
//TOBEORNOTTOBEORTOBEORNOT
println(result)
}
}
| jaimeguzman/learning | scala/lzw.scala | Scala | apache-2.0 | 2,199 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import exceptions.ModifiableMessage
/**
* Trait providing an implicit conversion that allows clues to be placed after a block of code.
*
* <p>
* You can use the <code>withClue</code> construct provided by <code>Assertions</code>, which is
* extended by every style trait in ScalaTest, to add extra information to reports of failed or canceled tests.
* The <code>withClue</code> from <code>Assertions</code> places the "clue string" at the front, both
* in the code and in the resulting message:
*
* <pre class="stHighlight">
* withClue("This is a prepended clue;") {
* 1 + 1 should equal (3)
* }
* </pre>
*
* <p>
* The above expression will yield the failure message:
* </p>
*
* <p>
* <code>This is a prepended clue; 2 did not equal 3</code>
* </p>
*
* <p>
* If you mix in this trait, or import its members via its companion object, you can alternatively place
* the clue string at the end, like this:
* </p>
*
* <pre class="stHighlight">
* { 1 + 1 should equal (3) } withClue "now the clue comes after"
* </pre>
*
* <p>
* The above expression will yield the failure message:
* </p>
*
* <p>
* <code>2 did not equal 3 now the clue comes after</code>
* </p>
*
* <p>
* If no space is already present, either at the beginning of the clue string or at the end
* of the current message, a space will be placed between the two, unless the clue string
* starts with one of the punctuation characters: comma (<code>,</code>), period (<code>.</code>),
* or semicolon (<code>;</code>). For example, the failure message in the above example
* includes an extra space inserted between <em>3</em> and <em>now</em>.
* </p>
*
* <p>
* By contrast this code, which has a clue string starting with comma:
* </p>
*
* <pre class="stHighlight">
* { 1 + 1 should equal (3) } withClue ", now the clue comes after"
* </pre>
*
* <p>
* Will yield a failure message with no extra inserted space:
* </p>
*
* <p>
* <code>2 did not equal 3, now the clue comes after</code>
* </p>
*
* <p>
* The <code>withClue</code> method will only append the clue string to the detail
* message of exception types that mix in the <code>ModifiableMessage</code> trait.
* See the documentation for <a href="exceptions/ModifiableMessage.html"><code>ModifiableMessage</code></a> for more
* information.
* </p>
*
* <p>
* Note: the reason this functionality is not provided by <code>Assertions</code> directly, like the
* prepended <code>withClue</code> construct, is because appended clues require an implicit conversion.
* ScalaTest only gives you one implicit conversion by default in any test class to minimize the
* potential for conflicts with other implicit conversions you may be using. All other implicit conversions,
* including the one provided by this trait, you must explicitly invite into your code through inheritance
* or an import.
* </p>
*
* @author Bill Venners
*/
trait AppendedClues {
/**
* Class that provides a <code>withClue</code> method that appends clue strings to any
* <a href="ModifiableMessage.html"><code>ModifiableMessage</code></a> exception
* thrown by the passed by-name parameter.
*
* @author Bill Venners
*/
class Clueful[T](fun: => T) {
/**
* Executes the block of code passed as the constructor parameter to this <code>Clueful</code>, and, if it
* completes abruptly with a <code>ModifiableMessage</code> exception,
* appends the "clue" string passed to this method to the end of the detail message
* of that thrown exception, then rethrows it. If clue does not begin in a white space
* character or one of the punctuation characters: comma (<code>,</code>),
* period (<code>.</code>), or semicolon (<code>;</code>), one space will be added
* between it and the existing detail message (unless the detail message is
* not defined).
*
* <p>
* This method allows you to add more information about what went wrong that will be
* reported when a test fails or cancels. For example, this code:
* </p>
*
* <pre class="stHighlight">
* { 1 + 1 should equal (3) } withClue ", not even for very large values of 1"
* </pre>
*
* <p>
* Would yield a <code>TestFailed</code> exception whose message would be:
* </p>
*
* <pre>
* 2 did not equal 3, not even for very large values of 1
* </pre>
*
* @throws NullPointerException if the passed <code>clue</code> is <code>null</code>
*/
def withClue(clue: Any): T = {
if (clue == null)
throw new NullPointerException("clue was null")
def append(currentMessage: Option[String]) =
currentMessage match {
case Some(msg) =>
// clue.toString.head is guaranteed to work, because append() only called if clue.toString != ""
val firstChar = clue.toString.head
if (firstChar.isWhitespace || firstChar == '.' || firstChar == ',' || firstChar == ';')
Some(msg + clue.toString)
else
Some(msg + " " + clue.toString)
case None => Some(clue.toString)
}
try {
fun
}
catch {
case e: ModifiableMessage[_] =>
if (clue.toString != "")
throw e.modifyMessage(append)
else
throw e
}
}
}
/**
* Implicit conversion that allows clues to be place after a block of code.
*/
implicit def convertToClueful[T](fun: => T) = new Clueful(fun)
}
/**
* Companion object that facilitates the importing of <code>AppendedClues</code> members as
* an alternative to mixing it in. One use case is to import <code>AppendedClues</code>
* members so you can use them in the Scala interpreter.
*/
object AppendedClues extends AppendedClues
| vivosys/scalatest | src/main/scala/org/scalatest/AppendedClues.scala | Scala | apache-2.0 | 6,432 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.core.util
import java.lang.System.{ currentTimeMillis, nanoTime }
object TimeHelper {
val currentTimeMillisReference = currentTimeMillis
val nanoTimeReference = nanoTime
def computeTimeMillisFromNanos(nanos: Long) = (nanos - nanoTimeReference) / 1000000 + currentTimeMillisReference
def nowMillis = computeTimeMillisFromNanos(nanoTime)
def nowSeconds = computeTimeMillisFromNanos(nanoTime) / 1000
} | Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-core/src/main/scala/com/excilys/ebi/gatling/core/util/TimeHelper.scala | Scala | gpl-2.0 | 1,084 |
package org.bitcoins.testkit.core.gen
import org.bitcoins.core.currency.{
Bitcoins,
CurrencyUnit,
CurrencyUnits,
Satoshis
}
import org.bitcoins.core.number.Int64
import org.bitcoins.core.protocol.ln.currency._
import org.scalacheck.Gen
import org.bitcoins.core.wallet.fee.FeeUnit
import org.bitcoins.core.wallet.fee.SatoshisPerByte
import org.bitcoins.core.wallet.fee.SatoshisPerKiloByte
import org.bitcoins.core.wallet.fee.SatoshisPerVirtualByte
trait CurrencyUnitGenerator {
def satsPerByte: Gen[SatoshisPerByte] = {
for {
curr <- positiveRealistic
} yield SatoshisPerByte(curr)
}
def satsPerKiloByte: Gen[SatoshisPerKiloByte] = {
for {
curr <- positiveRealistic
} yield SatoshisPerKiloByte(curr)
}
def satsPerVirtualByte: Gen[SatoshisPerVirtualByte] = {
for {
curr <- positiveRealistic
} yield SatoshisPerVirtualByte(curr)
}
def feeUnit: Gen[FeeUnit] =
Gen.oneOf(satsPerByte, satsPerKiloByte, satsPerVirtualByte)
def satoshis: Gen[Satoshis] =
for {
int64 <- NumberGenerator.int64s
} yield Satoshis(int64)
def bitcoins: Gen[Bitcoins] =
for {
sat <- satoshis
} yield Bitcoins(sat)
def currencyUnit: Gen[CurrencyUnit] = Gen.oneOf(satoshis, bitcoins)
def positiveSatoshis: Gen[Satoshis] =
satoshis.suchThat(_ >= CurrencyUnits.zero)
/**
* Generates a postiive satoshi value that is 'realistic'. This current 'realistic' range
* is from 0 to 1,000,000 bitcoin
*/
def positiveRealistic: Gen[Satoshis] =
Gen.choose(0, Bitcoins(1000000).satoshis.toLong).map { n =>
Satoshis(Int64(n))
}
}
object CurrencyUnitGenerator extends CurrencyUnitGenerator
trait LnCurrencyUnitGenerator {
def milliBitcoin: Gen[MilliBitcoins] =
for {
amount <- Gen.choose(MilliBitcoins.min.toLong, MilliBitcoins.max.toLong)
} yield MilliBitcoins(amount)
def microBitcoin: Gen[MicroBitcoins] =
for {
amount <- Gen.choose(MicroBitcoins.min.toLong, MicroBitcoins.max.toLong)
} yield MicroBitcoins(amount)
def nanoBitcoin: Gen[NanoBitcoins] =
for {
amount <- Gen.choose(NanoBitcoins.min.toLong, NanoBitcoins.max.toLong)
} yield NanoBitcoins(amount)
def picoBitcoin: Gen[PicoBitcoins] =
for {
amount <- Gen.choose(PicoBitcoins.min.toLong, PicoBitcoins.max.toLong)
} yield PicoBitcoins(amount)
def lnCurrencyUnit: Gen[LnCurrencyUnit] =
Gen.oneOf(milliBitcoin, microBitcoin, nanoBitcoin, picoBitcoin)
def negativeLnCurrencyUnit: Gen[LnCurrencyUnit] =
lnCurrencyUnit.suchThat(_ < LnCurrencyUnits.zero)
}
object LnCurrencyUnitGenerator extends LnCurrencyUnitGenerator
| bitcoin-s/bitcoin-s-core | testkit/src/main/scala/org/bitcoins/testkit/core/gen/CurrencyUnitGenerator.scala | Scala | mit | 2,660 |
package com.appliedscala.generator.services
import better.files
import better.files.FileMonitor
import com.appliedscala.generator.errors._
import org.slf4j.LoggerFactory
import java.nio.file.Path
import zio._
import zio.stream.ZStream
import com.appliedscala.generator.model.FileChangeEvent
import zio.blocking._
import com.appliedscala.generator.model.FileChangeAction
class MonitorService {
private val logger = LoggerFactory.getLogger(this.getClass)
def registerFileWatcher(dirFiles: Seq[Path]): ZStream[Blocking, FileMonitorError, FileChangeEvent] = {
ZStream.effectAsyncM[Blocking, FileMonitorError, FileChangeEvent] { pushMessage =>
blockingExecutor
.flatMap { executor =>
Task {
logger.info("Registering a file watcher")
def fileChanged(file: files.File, action: FileChangeAction): Unit = {
pushMessage(ZIO.succeed(Chunk(FileChangeEvent(file.path, action, System.currentTimeMillis()))))
}
def exceptionOccurred(th: Throwable): Unit = {
pushMessage(ZIO.fail(Some(FileMonitorError(th))))
}
dirFiles.foreach { dirFile =>
val monitor = new CustomFileMonitor(dirFile, fileChanged, exceptionOccurred)
monitor.start()(executor.asEC)
}
logger.info(s"Waiting for changes...")
}
}
.catchAll { th =>
ZIO.fail(FileMonitorError(th))
}
}
}
class CustomFileMonitor(contentDirFile: Path, fileChanged: (files.File, FileChangeAction) => Unit,
exceptionOccurred: Throwable => Unit)
extends FileMonitor(contentDirFile, recursive = true) {
override def onCreate(file: files.File, count: Int): Unit = fileChanged(file, FileChangeAction.Created)
override def onModify(file: files.File, count: Int): Unit = fileChanged(file, FileChangeAction.Updated)
override def onDelete(file: files.File, count: Int): Unit = fileChanged(file, FileChangeAction.Deleted)
override def onException(th: Throwable): Unit = exceptionOccurred(th)
}
}
| denisftw/s2gen | src/main/scala/com/appliedscala/generator/services/MonitorService.scala | Scala | mit | 2,083 |
package msgpack4z
import scalaz.-\/
// GENERATED CODE: DO NOT EDIT.
object CaseCodec {
def codec1[A1, Z](construct: A1 => Z, extract: Z => Option[A1])(implicit A1: MsgpackCodec[A1]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(1)
A1.pack(packer, extract(z).get)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 1) {
A1.unpack(unpacker).map(construct)
}else{
-\/(new UnexpectedArraySize(1, size))
}
}
)
def codec[A1, Z](construct: A1 => Z, extract: Z => Option[A1])(implicit A1: MsgpackCodec[A1]): MsgpackCodec[Z] =
codec1(construct, extract)(A1)
def codec2[A1, A2, Z](construct: (A1, A2) => Z, extract: Z => Option[(A1, A2)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(2)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 2) {
val result = zeroapply.DisjunctionApply.apply2(
A1.unpack(unpacker), A2.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(2, size))
}
}
)
def codec[A1, A2, Z](construct: (A1, A2) => Z, extract: Z => Option[(A1, A2)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2]): MsgpackCodec[Z] =
codec2(construct, extract)(A1, A2)
def codec3[A1, A2, A3, Z](construct: (A1, A2, A3) => Z, extract: Z => Option[(A1, A2, A3)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(3)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 3) {
val result = zeroapply.DisjunctionApply.apply3(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(3, size))
}
}
)
def codec[A1, A2, A3, Z](construct: (A1, A2, A3) => Z, extract: Z => Option[(A1, A2, A3)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3]): MsgpackCodec[Z] =
codec3(construct, extract)(A1, A2, A3)
def codec4[A1, A2, A3, A4, Z](construct: (A1, A2, A3, A4) => Z, extract: Z => Option[(A1, A2, A3, A4)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(4)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 4) {
val result = zeroapply.DisjunctionApply.apply4(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(4, size))
}
}
)
def codec[A1, A2, A3, A4, Z](construct: (A1, A2, A3, A4) => Z, extract: Z => Option[(A1, A2, A3, A4)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4]): MsgpackCodec[Z] =
codec4(construct, extract)(A1, A2, A3, A4)
def codec5[A1, A2, A3, A4, A5, Z](construct: (A1, A2, A3, A4, A5) => Z, extract: Z => Option[(A1, A2, A3, A4, A5)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(5)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 5) {
val result = zeroapply.DisjunctionApply.apply5(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(5, size))
}
}
)
def codec[A1, A2, A3, A4, A5, Z](construct: (A1, A2, A3, A4, A5) => Z, extract: Z => Option[(A1, A2, A3, A4, A5)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5]): MsgpackCodec[Z] =
codec5(construct, extract)(A1, A2, A3, A4, A5)
def codec6[A1, A2, A3, A4, A5, A6, Z](construct: (A1, A2, A3, A4, A5, A6) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(6)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 6) {
val result = zeroapply.DisjunctionApply.apply6(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(6, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, Z](construct: (A1, A2, A3, A4, A5, A6) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6]): MsgpackCodec[Z] =
codec6(construct, extract)(A1, A2, A3, A4, A5, A6)
def codec7[A1, A2, A3, A4, A5, A6, A7, Z](construct: (A1, A2, A3, A4, A5, A6, A7) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(7)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 7) {
val result = zeroapply.DisjunctionApply.apply7(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(7, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, Z](construct: (A1, A2, A3, A4, A5, A6, A7) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7]): MsgpackCodec[Z] =
codec7(construct, extract)(A1, A2, A3, A4, A5, A6, A7)
def codec8[A1, A2, A3, A4, A5, A6, A7, A8, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(8)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 8) {
val result = zeroapply.DisjunctionApply.apply8(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(8, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8]): MsgpackCodec[Z] =
codec8(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8)
def codec9[A1, A2, A3, A4, A5, A6, A7, A8, A9, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(9)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 9) {
val result = zeroapply.DisjunctionApply.apply9(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(9, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9]): MsgpackCodec[Z] =
codec9(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9)
def codec10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(10)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 10) {
val result = zeroapply.DisjunctionApply.apply10(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(10, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10]): MsgpackCodec[Z] =
codec10(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)
def codec11[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(11)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 11) {
val result = zeroapply.DisjunctionApply.apply11(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(11, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11]): MsgpackCodec[Z] =
codec11(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11)
def codec12[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(12)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 12) {
val result = zeroapply.DisjunctionApply.apply12(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(12, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12]): MsgpackCodec[Z] =
codec12(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12)
def codec13[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(13)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 13) {
val result = zeroapply.DisjunctionApply.apply13(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(13, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13]): MsgpackCodec[Z] =
codec13(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13)
def codec14[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(14)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 14) {
val result = zeroapply.DisjunctionApply.apply14(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(14, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14]): MsgpackCodec[Z] =
codec14(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14)
def codec15[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(15)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 15) {
val result = zeroapply.DisjunctionApply.apply15(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(15, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15]): MsgpackCodec[Z] =
codec15(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15)
def codec16[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(16)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 16) {
val result = zeroapply.DisjunctionApply.apply16(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(16, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16]): MsgpackCodec[Z] =
codec16(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16)
def codec17[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(17)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 17) {
val result = zeroapply.DisjunctionApply.apply17(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(17, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17]): MsgpackCodec[Z] =
codec17(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17)
def codec18[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(18)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17); A18.pack(packer, x._18)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 18) {
val result = zeroapply.DisjunctionApply.apply18(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker), A18.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(18, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18]): MsgpackCodec[Z] =
codec18(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18)
def codec19[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(19)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17); A18.pack(packer, x._18); A19.pack(packer, x._19)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 19) {
val result = zeroapply.DisjunctionApply.apply19(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker), A18.unpack(unpacker), A19.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(19, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19]): MsgpackCodec[Z] =
codec19(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19)
def codec20[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(20)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17); A18.pack(packer, x._18); A19.pack(packer, x._19); A20.pack(packer, x._20)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 20) {
val result = zeroapply.DisjunctionApply.apply20(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker), A18.unpack(unpacker), A19.unpack(unpacker), A20.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(20, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20]): MsgpackCodec[Z] =
codec20(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20)
def codec21[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20], A21: MsgpackCodec[A21]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(21)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17); A18.pack(packer, x._18); A19.pack(packer, x._19); A20.pack(packer, x._20); A21.pack(packer, x._21)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 21) {
val result = zeroapply.DisjunctionApply.apply21(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker), A18.unpack(unpacker), A19.unpack(unpacker), A20.unpack(unpacker), A21.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(21, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20], A21: MsgpackCodec[A21]): MsgpackCodec[Z] =
codec21(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21)
def codec22[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20], A21: MsgpackCodec[A21], A22: MsgpackCodec[A22]): MsgpackCodec[Z] =
MsgpackCodec.codec(
(packer, z) => {
packer.packArrayHeader(22)
val x = extract(z).get
A1.pack(packer, x._1); A2.pack(packer, x._2); A3.pack(packer, x._3); A4.pack(packer, x._4); A5.pack(packer, x._5); A6.pack(packer, x._6); A7.pack(packer, x._7); A8.pack(packer, x._8); A9.pack(packer, x._9); A10.pack(packer, x._10); A11.pack(packer, x._11); A12.pack(packer, x._12); A13.pack(packer, x._13); A14.pack(packer, x._14); A15.pack(packer, x._15); A16.pack(packer, x._16); A17.pack(packer, x._17); A18.pack(packer, x._18); A19.pack(packer, x._19); A20.pack(packer, x._20); A21.pack(packer, x._21); A22.pack(packer, x._22)
packer.arrayEnd()
}
,
unpacker => {
val size = unpacker.unpackArrayHeader()
if(size == 22) {
val result = zeroapply.DisjunctionApply.apply22(
A1.unpack(unpacker), A2.unpack(unpacker), A3.unpack(unpacker), A4.unpack(unpacker), A5.unpack(unpacker), A6.unpack(unpacker), A7.unpack(unpacker), A8.unpack(unpacker), A9.unpack(unpacker), A10.unpack(unpacker), A11.unpack(unpacker), A12.unpack(unpacker), A13.unpack(unpacker), A14.unpack(unpacker), A15.unpack(unpacker), A16.unpack(unpacker), A17.unpack(unpacker), A18.unpack(unpacker), A19.unpack(unpacker), A20.unpack(unpacker), A21.unpack(unpacker), A22.unpack(unpacker)
)(construct)
unpacker.arrayEnd()
result
}else{
-\/(new UnexpectedArraySize(22, size))
}
}
)
def codec[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22, Z](construct: (A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22) => Z, extract: Z => Option[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22)])(implicit A1: MsgpackCodec[A1], A2: MsgpackCodec[A2], A3: MsgpackCodec[A3], A4: MsgpackCodec[A4], A5: MsgpackCodec[A5], A6: MsgpackCodec[A6], A7: MsgpackCodec[A7], A8: MsgpackCodec[A8], A9: MsgpackCodec[A9], A10: MsgpackCodec[A10], A11: MsgpackCodec[A11], A12: MsgpackCodec[A12], A13: MsgpackCodec[A13], A14: MsgpackCodec[A14], A15: MsgpackCodec[A15], A16: MsgpackCodec[A16], A17: MsgpackCodec[A17], A18: MsgpackCodec[A18], A19: MsgpackCodec[A19], A20: MsgpackCodec[A20], A21: MsgpackCodec[A21], A22: MsgpackCodec[A22]): MsgpackCodec[Z] =
codec22(construct, extract)(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22)
}
| msgpack4z/msgpack4z-core | src/main/scala/msgpack4z/CaseCodec.scala | Scala | mit | 45,654 |
package x7c1.linen.repository.channel.preset
import org.junit.Assert.assertEquals
import org.junit.Test
import org.junit.runner.RunWith
import org.robolectric.annotation.Config
import org.robolectric.{RobolectricTestRunner, RuntimeEnvironment}
import org.scalatest.junit.JUnitSuiteLike
import x7c1.linen.database.control.DatabaseHelper
import x7c1.linen.repository.account.setup.ClientAccountSetup
import x7c1.linen.repository.channel.subscribe.ChannelSubscriber
import x7c1.linen.repository.preset.PresetFactory
@Config(manifest=Config.NONE)
@RunWith(classOf[RobolectricTestRunner])
class PresetChannelsAccessorTest extends JUnitSuiteLike {
@Test
def testSetup1() = {
val context = RuntimeEnvironment.application
val helper = new DatabaseHelper(context)
val factory = new PresetFactory(helper)
factory.setupJapanesePresets()
val Right(client) = ClientAccountSetup(helper).findOrCreate()
val Right(accessor) = AllPresetChannelsAccessor.create(
clientAccountId = client.accountId,
helper = helper
)
assertEquals(3, accessor.length)
assertEquals("Tech", accessor.findAt(0).get.name)
assertEquals("Game", accessor.findAt(1).get.name)
assertEquals(false, accessor.findAt(0).get.isSubscribed)
}
@Test
def testSubscribeChannel() = {
val context = RuntimeEnvironment.application
val helper = new DatabaseHelper(context)
val factory = new PresetFactory(helper)
factory.setupJapanesePresets()
val Right(client) = ClientAccountSetup(helper).findOrCreate()
def channelAt(n: Int) = {
val Right(accessor) = AllPresetChannelsAccessor.create(
clientAccountId = client.accountId,
helper = helper
)
val Some(channel) = accessor findAt n
channel
}
assertEquals(false, channelAt(0).isSubscribed)
assertEquals(false, channelAt(1).isSubscribed)
val subscriber = new ChannelSubscriber(account = client, helper)
subscriber subscribe channelAt(0).channelId
assertEquals(true, channelAt(0).isSubscribed)
assertEquals(false, channelAt(1).isSubscribed)
}
}
| x7c1/Linen | linen-repository/src/test/scala/x7c1/linen/repository/channel/preset/PresetChannelsAccessorTest.scala | Scala | mit | 2,101 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.rst
import laika.tree.Elements._
import laika.util.Builders._
/** API for creating interpreted text roles, the extension mechanism for inline elements of reStructuredText.
* The API did not aim to mimic the API of the original Python reference implementation.
* Instead the goal was to create an API that is idiomatic Scala, fully typesafe and as concise as possible.
* Yet it should be flexible enough to semantically support the options of the Python text roles, so that
* ideally most existing Python text roles could theoretically get ported to Laika.
*
* Entry point for creating a new role is the `TextRole` object. It allows to specify the following
* aspects that define a text role:
*
* - The name with which it can be referred to by both, a span of interpreted text and a role
* directive to further customize it.
*
* - The default value, that should get passed to the role function in case it is used
* directly in interpreted text without customization through a role directive.
*
* - The role directive that specifies how the role can be customized. The options
* for role directives are almost identical to regular directives, the only difference
* being that role directives do not support arguments, only fields and body elements.
*
* - The actual role function. It gets invoked for each occurrence of interpreted text
* that refers to this role, either directly by name or to the name of a role directive
* that customized this role. The first argument is either the default value
* or the result of the role directive, the second is the actual text of the interpreted
* text span. The return value of the role function is the actual `Span` instance
* that the original interpreted text should be replaced with.
*
* A role directive may consist of any combination of fields and body elements:
*
* {{{
* .. role:: ticket(link)
* :base-url: http://www.company.com/tickets/
* }}}
*
* In the example above `ticket` is the name of the customized role, `link` the name
* of the base role and `base-url` the value that overrides the default defined in the
* base role. For the specification details on role directives see
* [[http://docutils.sourceforge.net/docs/ref/rst/directives.html#custom-interpreted-text-roles]].
*
* Before such a role directive can be used, an implementation has to be provided
* for the base role with the name `link`. For more details on implementing directives
* see [[laika.parse.rst.Directives]].
*
* The implementation of the `link` text role could look like this:
*
* {{{
* val rst = ReStructuredText withTextRoles (
* TextRole("link", "http://www.company.com/main/")(field("base-url")) {
* (base, text) => Link(List(Text(text)), base + text)
* }
* )
*
* Transform from rst to HTML fromFile "hello.rst" toFile "hello.html"
* }}}
*
* We specify the name of the role to be `link`, and the default value the URL provided as the
* second argument. The second parameter list specifies the role directive implementation,
* in this case only consisting of a call to `field("base-url")` which specifies a required
* field of type `String` (since no conversion function was supplied). The type of the result
* of the directive has to match the type of the default value.
* Finally the role function is defined that accepts two arguments. The first is the base
* url, either the default in case the base role is used directly, or the value specified
* with the `base-url` field in a customized role. The second is the actual text from the
* interpreted text span. Finally the directive gets registered with the `ReStructuredText`
* parser.
*
* If you need to define more fields or body content they can be added with the `~` combinator
* just like with normal directives. Likewise you can specify validators and converters for
* fields and body values like documented in [[laika.parse.rst.Directives]].
*
* Our example role can then be used in the following ways:
*
* Using the base role directly:
*
* {{{
* For details read our :link:`documentation`.
* }}}
*
* This would result in the following HTML:
*
* {{{
* For details read our <a href="http://www.company.com/main/documentation">documentation</a>.
* }}}
*
* Using the customized role called `ticket`:
*
* {{{
* For details see ticket :ticket:`344`.
* }}}
*
* This would result in the following HTML:
*
* {{{
* For details see ticket <a href="http://www.company.com/ticket/344">344</a>.
* }}}
*
* @author Jens Halm
*/
object TextRoles {
/** API to implement by the actual role directive parser.
* The methods of this trait correspond to the methods of the `Parts` object,
* only differing in return type.
*/
trait RoleDirectiveParser {
def field [T](name: String, convert: String => Either[String,T]): Result[T]
def optField [T](name: String, convert: String => Either[String,T]): Result[Option[T]]
def blockContent: Result[Seq[Block]]
def spanContent: Result[Seq[Span]]
def content [T](f: String => Either[String,T]): Result[T]
}
/** Represents a single part (field or body) of a directive.
*/
abstract class RoleDirectivePart[+A] extends (RoleDirectiveParser => Result[A]) { self =>
def map [B](f: A => B): RoleDirectivePart[B] = new RoleDirectivePart[B] {
def apply (p: RoleDirectiveParser) = self(p) map f
}
}
/** Type class required for using the generic `Builders` API with directives.
*/
implicit object CanBuildRoleDirectivePart extends CanBuild[RoleDirectivePart] {
def apply [A,B](ma: RoleDirectivePart[A], mb: RoleDirectivePart[B]): RoleDirectivePart[A~B] = new RoleDirectivePart[A~B] {
def apply (p: RoleDirectiveParser) = {
val a = ma(p)
val b = mb(p)
new Result(new ~(a.get,b.get))
}
}
def map [A,B](m: RoleDirectivePart[A], f: A => B): RoleDirectivePart[B] = m map f
}
/** The public user API for specifying the required and optional parts of a directive
* (fields or body) together with optional converter/validator functions.
*/
object Parts {
private def part [T](f: RoleDirectiveParser => Result[T]): RoleDirectivePart[T] = new RoleDirectivePart[T] {
def apply (p: RoleDirectiveParser) = f(p)
}
/** Specifies a required named field.
*
* @param name the name of the field as used in the directive markup (without the colons)
* @param convert the function to use for converting and validating the parsed value
* @return a directive part that can be combined with further parts with the `~` operator
*/
def field [T](name: String,
convert: String => Either[String,T] = { s:String => Right(s) }): RoleDirectivePart[T] =
part(_.field(name, convert))
/** Specifies an optional named field.
*
* @param name the name of the field as used in the directive markup (without the colons)
* @param convert the function to use for converting and validating the parsed value
* if it is present
* @return a directive part that can be combined with further parts with the `~` operator
*/
def optField [T](name: String,
convert: String => Either[String,T] = { s:String => Right(s) }): RoleDirectivePart[Option[T]] =
part(_.optField(name, convert))
/** Specifies standard block-level content as the body of the directive.
*
* @return a directive part that can be combined with further parts with the `~` operator
*/
def blockContent: RoleDirectivePart[Seq[Block]] = part(_.blockContent)
def spanContent: RoleDirectivePart[Seq[Span]] = part(_.spanContent)
/** Specifies that the body of the directive markup should get passed to the conversion function as a raw string.
*
* @param f the function to use for converting and validating the parsed value
* @return a directive part that can be combined with further parts with the `~` operator
*/
def content [T](f: String => Either[String,T]): RoleDirectivePart[T] = part(_.content(f))
}
/** Represents a single text role implementation.
*/
class TextRole private (val name: String, val default: String => Span, val part: BlockParsers with InlineParsers => RoleDirectivePart[String => Span])
/** API entry point for setting up a text role that.
*/
object TextRole {
/** Creates a new text role that can be referred to by interpreted text with the specified name.
* The `DirectivePart` can be created by using the methods of the `Parts`
* object and specifies the functionality for users who customize a text role with a role directive.
* The `roleF` function is the final function that will be invoked with either the default value
* or the result of the role directive as the first argument (depending on whether the user used
* the default role or a customized one). The actual text of the interpreted text will be passed
* as the second argument. The return value of the role function is the actual `Span` instance
* that the original interpreted text should be replaced with.
*
* @param name the name the text role can be used with in interpreted text
* @param default the default value to pass to the role function in case the interpreted text
* is not referring to a role directive
* @param part the implementation of the role directive for customizing the text role
* that can be created by using the combinators of the `Parts` object
* @param roleF the final role function that gets passed the result of the directive (or default
* value) and the actual text of the interpreted text span
* @return a new text role that can be registered with the reStructuredText parser
*/
def apply [T] (name: String, default: T)(part: RoleDirectivePart[T])(roleF: (T, String) => Span): TextRole =
new TextRole(name.toLowerCase, str => roleF(default, str), _ => part map (res => (str: String) => roleF(res, str)))
/** Creates a new text role that can be referred to by interpreted text with the specified name.
* The `DirectivePart` can be created by using the methods of the `Parts`
* object and specifies the functionality for users who customize a text role with a role directive.
* The `roleF` function is the final function that will be invoked with either the default value
* or the result of the role directive as the first argument (depending on whether the user used
* the default role or a customized one). The actual text of the interpreted text will be passed
* as the second argument. The return value of the role function is the actual `Span` instance
* that the original interpreted text should be replaced with.
*
* In contrast to the `apply` function, this function allows to
* depend on the standard block and span parsers. This is necessary if
* the directive does both, require a custom parser for arguments or body
* and allow for nested directives in those parsers.
*
* @param name the name the text role can be used with in interpreted text
* @param default the default value to pass to the role function in case the interpreted text
* is not referring to a role directive
* @param part a function returning the implementation of the role directive for customizing the text role
* that can be created by using the combinators of the `Parts` object
* @param roleF the final role function that gets passed the result of the directive (or default
* value) and the actual text of the interpreted text span
* @return a new text role that can be registered with the reStructuredText parser
*/
def recursive [T] (name: String, default: T)(part: BlockParsers with InlineParsers => RoleDirectivePart[T])(roleF: (T, String) => Span): TextRole =
new TextRole(name.toLowerCase, str => roleF(default, str), parsers => part(parsers) map (res => (str: String) => roleF(res, str)))
}
}
| amuramatsu/Laika | core/src/main/scala/laika/parse/rst/TextRoles.scala | Scala | apache-2.0 | 12,986 |
/**
* Copyright 2014-2015 Martin Cooper
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.martincooper.datatable.DataTableSpecs
import com.github.martincooper.datatable.{ DataColumn, DataTable, DataTableException, GenericColumn }
import org.scalatest.{ FlatSpec, Matchers }
class DataTableSpec extends FlatSpec with Matchers {
"A new DataTable" can "be created with a name and no columns" in {
val dataTable = DataTable("TestTable")
dataTable.isSuccess should be(true)
dataTable.get.name should be("TestTable")
dataTable.get.columns.length should be(0)
}
it can "be created with a name and default columns" in {
val dataColOne = new DataColumn[Int]("ColOne", (0 to 19) map { i => i })
val dataColTwo = new DataColumn[String]("ColTwo", (0 to 19) map { i => "Value : " + i })
val result = DataTable("TestTable", Seq(dataColOne, dataColTwo))
result.isSuccess should be(true)
val dataTable = result.get
dataTable.name should be("TestTable")
dataTable.columns.length should be(2)
dataTable.columns(0).data(4) shouldBe a[Integer]
dataTable.columns(0).data(4) should be(4)
dataTable.columns(1).data(4) shouldBe a[String]
dataTable.columns(1).data(4) should be("Value : 4")
}
it should "return correct row count" in {
val dataColOne = new DataColumn[Int]("ColOne", (0 to 19) map { i => i })
val dataColTwo = new DataColumn[String]("ColTwo", (0 to 19) map { i => "Value : " + i })
val result = DataTable("TestTable", Seq(dataColOne, dataColTwo))
result.isSuccess should be(true)
result.get.rowCount should be(20)
}
it should "return correct row count when it has no columns" in {
val result = DataTable("TestTable", Seq[GenericColumn]())
result.isSuccess should be(true)
result.get.rowCount should be(0)
}
it should "prevent different column lengths" in {
val dataColOne = new DataColumn[Int]("ColOne", (0 to 10) map { i => i })
val dataColTwo = new DataColumn[String]("ColTwo", (0 to 20) map { i => "Value : " + i })
val result = DataTable("TestTable", Seq(dataColOne, dataColTwo))
result.isFailure should be(true)
result.failed.get should be(DataTableException("Columns have uneven row count."))
}
it should "prevent duplicate column names" in {
val dataColOne = new DataColumn[Int]("ColOne", (0 to 10) map { i => i })
val dataColTwo = new DataColumn[String]("ColOne", (0 to 10) map { i => "Value : " + i })
val result = DataTable("TestTable", Seq(dataColOne, dataColTwo))
result.isFailure should be(true)
result.failed.get should be(DataTableException("Columns contain duplicate names."))
}
}
| martincooper/scala-datatable | src/test/scala/com/github/martincooper/datatable/DataTableSpecs/DataTableSpec.scala | Scala | apache-2.0 | 3,203 |
/*
Copyright 2015 Mate1 inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created by Marc-André Lamothe on 11/23/15.
*/
package com.mate1.kafka.avro
/**
* The list of supported Avro encodings, the associated value will be published in the magic bytes preceding the message data.
*/
object AvroEncoding extends Enumeration {
val Binary = Value(0)
val JSON = Value(1)
}
| mate1/kafka-avro-tools | src/main/scala/com/mate1/kafka/avro/AvroEncoding.scala | Scala | apache-2.0 | 891 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.{And, Attribute, AttributeSet, Expression, ExpressionSet, PredicateHelper}
import org.apache.spark.sql.catalyst.plans.{Inner, InnerLike, JoinType}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
/**
* Cost-based join reorder.
* We may have several join reorder algorithms in the future. This class is the entry of these
* algorithms, and chooses which one to use.
*/
object CostBasedJoinReorder extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = {
if (!conf.cboEnabled || !conf.joinReorderEnabled) {
plan
} else {
val result = plan transformDown {
// Start reordering with a joinable item, which is an InnerLike join with conditions.
// Avoid reordering if a join hint is present.
case j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE) =>
reorder(j, j.output)
case p @ Project(projectList, Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE))
if projectList.forall(_.isInstanceOf[Attribute]) =>
reorder(p, p.output)
}
// After reordering is finished, convert OrderedJoin back to Join.
result transform {
case OrderedJoin(left, right, jt, cond) => Join(left, right, jt, cond, JoinHint.NONE)
}
}
}
private def reorder(plan: LogicalPlan, output: Seq[Attribute]): LogicalPlan = {
val (items, conditions) = extractInnerJoins(plan)
val result =
// Do reordering if the number of items is appropriate and join conditions exist.
// We also need to check if costs of all items can be evaluated.
if (items.size > 2 && items.size <= conf.joinReorderDPThreshold && conditions.nonEmpty &&
items.forall(_.stats.rowCount.isDefined)) {
JoinReorderDP.search(conf, items, conditions, output)
} else {
plan
}
// Set consecutive join nodes ordered.
replaceWithOrderedJoin(result)
}
/**
* Extracts items of consecutive inner joins and join conditions.
* This method works for bushy trees and left/right deep trees.
*/
private def extractInnerJoins(plan: LogicalPlan): (Seq[LogicalPlan], ExpressionSet) = {
plan match {
case Join(left, right, _: InnerLike, Some(cond), JoinHint.NONE) =>
val (leftPlans, leftConditions) = extractInnerJoins(left)
val (rightPlans, rightConditions) = extractInnerJoins(right)
(leftPlans ++ rightPlans, leftConditions ++ rightConditions ++
splitConjunctivePredicates(cond))
case Project(projectList, j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE))
if projectList.forall(_.isInstanceOf[Attribute]) =>
extractInnerJoins(j)
case _ =>
(Seq(plan), ExpressionSet())
}
}
private def replaceWithOrderedJoin(plan: LogicalPlan): LogicalPlan = plan match {
case j @ Join(left, right, jt: InnerLike, Some(cond), JoinHint.NONE) =>
val replacedLeft = replaceWithOrderedJoin(left)
val replacedRight = replaceWithOrderedJoin(right)
OrderedJoin(replacedLeft, replacedRight, jt, Some(cond))
case p @ Project(projectList, j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE)) =>
p.copy(child = replaceWithOrderedJoin(j))
case _ =>
plan
}
}
/** This is a mimic class for a join node that has been ordered. */
case class OrderedJoin(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
condition: Option[Expression]) extends BinaryNode {
override def output: Seq[Attribute] = left.output ++ right.output
}
/**
* Reorder the joins using a dynamic programming algorithm. This implementation is based on the
* paper: Access Path Selection in a Relational Database Management System.
* https://dl.acm.org/doi/10.1145/582095.582099
*
* First we put all items (basic joined nodes) into level 0, then we build all two-way joins
* at level 1 from plans at level 0 (single items), then build all 3-way joins from plans
* at previous levels (two-way joins and single items), then 4-way joins ... etc, until we
* build all n-way joins and pick the best plan among them.
*
* When building m-way joins, we only keep the best plan (with the lowest cost) for the same set
* of m items. E.g., for 3-way joins, we keep only the best plan for items {A, B, C} among
* plans (A J B) J C, (A J C) J B and (B J C) J A.
* We also prune cartesian product candidates when building a new plan if there exists no join
* condition involving references from both left and right. This pruning strategy significantly
* reduces the search space.
* E.g., given A J B J C J D with join conditions A.k1 = B.k1 and B.k2 = C.k2 and C.k3 = D.k3,
* plans maintained for each level are as follows:
* level 0: p({A}), p({B}), p({C}), p({D})
* level 1: p({A, B}), p({B, C}), p({C, D})
* level 2: p({A, B, C}), p({B, C, D})
* level 3: p({A, B, C, D})
* where p({A, B, C, D}) is the final output plan.
*
* For cost evaluation, since physical costs for operators are not available currently, we use
* cardinalities and sizes to compute costs.
*/
object JoinReorderDP extends PredicateHelper with Logging {
def search(
conf: SQLConf,
items: Seq[LogicalPlan],
conditions: ExpressionSet,
output: Seq[Attribute]): LogicalPlan = {
val startTime = System.nanoTime()
// Level i maintains all found plans for i + 1 items.
// Create the initial plans: each plan is a single item with zero cost.
val itemIndex = items.zipWithIndex
val foundPlans = mutable.Buffer[JoinPlanMap]({
// SPARK-32687: Change to use `LinkedHashMap` to make sure that items are
// inserted and iterated in the same order.
val joinPlanMap = new JoinPlanMap
itemIndex.foreach {
case (item, id) =>
joinPlanMap.put(Set(id), JoinPlan(Set(id), item, ExpressionSet(), Cost(0, 0)))
}
joinPlanMap
})
// Build filters from the join graph to be used by the search algorithm.
val filters = JoinReorderDPFilters.buildJoinGraphInfo(conf, items, conditions, itemIndex)
// Build plans for next levels until the last level has only one plan. This plan contains
// all items that can be joined, so there's no need to continue.
val topOutputSet = AttributeSet(output)
while (foundPlans.size < items.length) {
// Build plans for the next level.
foundPlans += searchLevel(foundPlans.toSeq, conf, conditions, topOutputSet, filters)
}
val durationInMs = (System.nanoTime() - startTime) / (1000 * 1000)
logDebug(s"Join reordering finished. Duration: $durationInMs ms, number of items: " +
s"${items.length}, number of plans in memo: ${foundPlans.map(_.size).sum}")
// The last level must have one and only one plan, because all items are joinable.
assert(foundPlans.size == items.length && foundPlans.last.size == 1)
foundPlans.last.head._2.plan match {
case p @ Project(projectList, j: Join) if projectList != output =>
assert(topOutputSet == p.outputSet)
// Keep the same order of final output attributes.
p.copy(projectList = output)
case finalPlan if !sameOutput(finalPlan, output) =>
Project(output, finalPlan)
case finalPlan =>
finalPlan
}
}
private def sameOutput(plan: LogicalPlan, expectedOutput: Seq[Attribute]): Boolean = {
val thisOutput = plan.output
thisOutput.length == expectedOutput.length && thisOutput.zip(expectedOutput).forall {
case (a1, a2) => a1.semanticEquals(a2)
}
}
/** Find all possible plans at the next level, based on existing levels. */
private def searchLevel(
existingLevels: Seq[JoinPlanMap],
conf: SQLConf,
conditions: ExpressionSet,
topOutput: AttributeSet,
filters: Option[JoinGraphInfo]): JoinPlanMap = {
val nextLevel = new JoinPlanMap
var k = 0
val lev = existingLevels.length - 1
// Build plans for the next level from plans at level k (one side of the join) and level
// lev - k (the other side of the join).
// For the lower level k, we only need to search from 0 to lev - k, because when building
// a join from A and B, both A J B and B J A are handled.
while (k <= lev - k) {
val oneSideCandidates = existingLevels(k).values.toSeq
for (i <- oneSideCandidates.indices) {
val oneSidePlan = oneSideCandidates(i)
val otherSideCandidates = if (k == lev - k) {
// Both sides of a join are at the same level, no need to repeat for previous ones.
oneSideCandidates.drop(i)
} else {
existingLevels(lev - k).values.toSeq
}
otherSideCandidates.foreach { otherSidePlan =>
buildJoin(oneSidePlan, otherSidePlan, conf, conditions, topOutput, filters) match {
case Some(newJoinPlan) =>
// Check if it's the first plan for the item set, or it's a better plan than
// the existing one due to lower cost.
val existingPlan = nextLevel.get(newJoinPlan.itemIds)
if (existingPlan.isEmpty || newJoinPlan.betterThan(existingPlan.get, conf)) {
nextLevel.update(newJoinPlan.itemIds, newJoinPlan)
}
case None =>
}
}
}
k += 1
}
nextLevel
}
/**
* Builds a new JoinPlan if the following conditions hold:
* - the sets of items contained in left and right sides do not overlap.
* - there exists at least one join condition involving references from both sides.
* - if star-join filter is enabled, allow the following combinations:
* 1) (oneJoinPlan U otherJoinPlan) is a subset of star-join
* 2) star-join is a subset of (oneJoinPlan U otherJoinPlan)
* 3) (oneJoinPlan U otherJoinPlan) is a subset of non star-join
*
* @param oneJoinPlan One side JoinPlan for building a new JoinPlan.
* @param otherJoinPlan The other side JoinPlan for building a new join node.
* @param conf SQLConf for statistics computation.
* @param conditions The overall set of join conditions.
* @param topOutput The output attributes of the final plan.
* @param filters Join graph info to be used as filters by the search algorithm.
* @return Builds and returns a new JoinPlan if both conditions hold. Otherwise, returns None.
*/
private def buildJoin(
oneJoinPlan: JoinPlan,
otherJoinPlan: JoinPlan,
conf: SQLConf,
conditions: ExpressionSet,
topOutput: AttributeSet,
filters: Option[JoinGraphInfo]): Option[JoinPlan] = {
if (oneJoinPlan.itemIds.intersect(otherJoinPlan.itemIds).nonEmpty) {
// Should not join two overlapping item sets.
return None
}
if (filters.isDefined) {
// Apply star-join filter, which ensures that tables in a star schema relationship
// are planned together. The star-filter will eliminate joins among star and non-star
// tables until the star joins are built. The following combinations are allowed:
// 1. (oneJoinPlan U otherJoinPlan) is a subset of star-join
// 2. star-join is a subset of (oneJoinPlan U otherJoinPlan)
// 3. (oneJoinPlan U otherJoinPlan) is a subset of non star-join
val isValidJoinCombination =
JoinReorderDPFilters.starJoinFilter(oneJoinPlan.itemIds, otherJoinPlan.itemIds,
filters.get)
if (!isValidJoinCombination) return None
}
val onePlan = oneJoinPlan.plan
val otherPlan = otherJoinPlan.plan
val joinConds = conditions
.filterNot(l => canEvaluate(l, onePlan))
.filterNot(r => canEvaluate(r, otherPlan))
.filter(e => e.references.subsetOf(onePlan.outputSet ++ otherPlan.outputSet))
if (joinConds.isEmpty) {
// Cartesian product is very expensive, so we exclude them from candidate plans.
// This also significantly reduces the search space.
return None
}
// Put the deeper side on the left, tend to build a left-deep tree.
val (left, right) = if (oneJoinPlan.itemIds.size >= otherJoinPlan.itemIds.size) {
(onePlan, otherPlan)
} else {
(otherPlan, onePlan)
}
val newJoin = Join(left, right, Inner, joinConds.reduceOption(And), JoinHint.NONE)
val collectedJoinConds = joinConds ++ oneJoinPlan.joinConds ++ otherJoinPlan.joinConds
val remainingConds = conditions -- collectedJoinConds
val neededAttr = AttributeSet(remainingConds.flatMap(_.references)) ++ topOutput
val neededFromNewJoin = newJoin.output.filter(neededAttr.contains)
val newPlan =
if ((newJoin.outputSet -- neededFromNewJoin).nonEmpty) {
Project(neededFromNewJoin, newJoin)
} else {
newJoin
}
val itemIds = oneJoinPlan.itemIds.union(otherJoinPlan.itemIds)
// Now the root node of onePlan/otherPlan becomes an intermediate join (if it's a non-leaf
// item), so the cost of the new join should also include its own cost.
val newPlanCost = oneJoinPlan.planCost + oneJoinPlan.rootCost(conf) +
otherJoinPlan.planCost + otherJoinPlan.rootCost(conf)
Some(JoinPlan(itemIds, newPlan, collectedJoinConds, newPlanCost))
}
/** Map[set of item ids, join plan for these items] */
type JoinPlanMap = mutable.LinkedHashMap[Set[Int], JoinPlan]
/**
* Partial join order in a specific level.
*
* @param itemIds Set of item ids participating in this partial plan.
* @param plan The plan tree with the lowest cost for these items found so far.
* @param joinConds Join conditions included in the plan.
* @param planCost The cost of this plan tree is the sum of costs of all intermediate joins.
*/
case class JoinPlan(
itemIds: Set[Int],
plan: LogicalPlan,
joinConds: ExpressionSet,
planCost: Cost) {
/** Get the cost of the root node of this plan tree. */
def rootCost(conf: SQLConf): Cost = {
if (itemIds.size > 1) {
val rootStats = plan.stats
Cost(rootStats.rowCount.get, rootStats.sizeInBytes)
} else {
// If the plan is a leaf item, it has zero cost.
Cost(0, 0)
}
}
def betterThan(other: JoinPlan, conf: SQLConf): Boolean = {
val thisCost = BigDecimal(this.planCost.card) * conf.joinReorderCardWeight +
BigDecimal(this.planCost.size) * (1 - conf.joinReorderCardWeight)
val otherCost = BigDecimal(other.planCost.card) * conf.joinReorderCardWeight +
BigDecimal(other.planCost.size) * (1 - conf.joinReorderCardWeight)
thisCost < otherCost
}
}
}
/**
* This class defines the cost model for a plan.
* @param card Cardinality (number of rows).
* @param size Size in bytes.
*/
case class Cost(card: BigInt, size: BigInt) {
def +(other: Cost): Cost = Cost(this.card + other.card, this.size + other.size)
}
/**
* Implements optional filters to reduce the search space for join enumeration.
*
* 1) Star-join filters: Plan star-joins together since they are assumed
* to have an optimal execution based on their RI relationship.
* 2) Cartesian products: Defer their planning later in the graph to avoid
* large intermediate results (expanding joins, in general).
* 3) Composite inners: Don't generate "bushy tree" plans to avoid materializing
* intermediate results.
*
* Filters (2) and (3) are not implemented.
*/
object JoinReorderDPFilters extends PredicateHelper {
/**
* Builds join graph information to be used by the filtering strategies.
* Currently, it builds the sets of star/non-star joins.
* It can be extended with the sets of connected/unconnected joins, which
* can be used to filter Cartesian products.
*/
def buildJoinGraphInfo(
conf: SQLConf,
items: Seq[LogicalPlan],
conditions: ExpressionSet,
itemIndex: Seq[(LogicalPlan, Int)]): Option[JoinGraphInfo] = {
if (conf.joinReorderDPStarFilter) {
// Compute the tables in a star-schema relationship.
val starJoin = StarSchemaDetection.findStarJoins(items, conditions.toSeq)
val nonStarJoin = items.filterNot(starJoin.contains(_))
if (starJoin.nonEmpty && nonStarJoin.nonEmpty) {
val itemMap = itemIndex.toMap
Some(JoinGraphInfo(starJoin.map(itemMap).toSet, nonStarJoin.map(itemMap).toSet))
} else {
// Nothing interesting to return.
None
}
} else {
// Star schema filter is not enabled.
None
}
}
/**
* Applies the star-join filter that eliminates join combinations among star
* and non-star tables until the star join is built.
*
* Given the oneSideJoinPlan/otherSideJoinPlan, which represent all the plan
* permutations generated by the DP join enumeration, and the star/non-star plans,
* the following plan combinations are allowed:
* 1. (oneSideJoinPlan U otherSideJoinPlan) is a subset of star-join
* 2. star-join is a subset of (oneSideJoinPlan U otherSideJoinPlan)
* 3. (oneSideJoinPlan U otherSideJoinPlan) is a subset of non star-join
*
* It assumes the sets are disjoint.
*
* Example query graph:
*
* t1 d1 - t2 - t3
* \\ /
* f1
* |
* d2
*
* star: {d1, f1, d2}
* non-star: {t2, t1, t3}
*
* level 0: (f1 ), (d2 ), (t3 ), (d1 ), (t1 ), (t2 )
* level 1: {t3 t2 }, {f1 d2 }, {f1 d1 }
* level 2: {d2 f1 d1 }
* level 3: {t1 d1 f1 d2 }, {t2 d1 f1 d2 }
* level 4: {d1 t2 f1 t1 d2 }, {d1 t3 t2 f1 d2 }
* level 5: {d1 t3 t2 f1 t1 d2 }
*
* @param oneSideJoinPlan One side of the join represented as a set of plan ids.
* @param otherSideJoinPlan The other side of the join represented as a set of plan ids.
* @param filters Star and non-star plans represented as sets of plan ids
*/
def starJoinFilter(
oneSideJoinPlan: Set[Int],
otherSideJoinPlan: Set[Int],
filters: JoinGraphInfo) : Boolean = {
val starJoins = filters.starJoins
val nonStarJoins = filters.nonStarJoins
val join = oneSideJoinPlan.union(otherSideJoinPlan)
// Disjoint sets
oneSideJoinPlan.intersect(otherSideJoinPlan).isEmpty &&
// Either star or non-star is empty
(starJoins.isEmpty || nonStarJoins.isEmpty ||
// Join is a subset of the star-join
join.subsetOf(starJoins) ||
// Star-join is a subset of join
starJoins.subsetOf(join) ||
// Join is a subset of non-star
join.subsetOf(nonStarJoins))
}
}
/**
* Helper class that keeps information about the join graph as sets of item/plan ids.
* It currently stores the star/non-star plans. It can be
* extended with the set of connected/unconnected plans.
*/
case class JoinGraphInfo (starJoins: Set[Int], nonStarJoins: Set[Int])
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/CostBasedJoinReorder.scala | Scala | apache-2.0 | 19,707 |
package io.shaka.http
import java.io.FileInputStream
import java.security.{KeyStore ⇒ JKeyStore}
import java.security.cert.X509Certificate
import javax.net.ssl._
object Https {
private val defaultProtocol = "TLS"
trait TrustStoreConfig
case class TrustServersByTrustStore(path: String, password: String) extends TrustStoreConfig
case object TrustAnyServer extends TrustStoreConfig
trait KeyStoreConfig
case class UseKeyStore(path: String, password: String) extends KeyStoreConfig
case object DoNotUseKeyStore extends KeyStoreConfig
case class HttpsConfig(trustStoreConfig: TrustStoreConfig, keyStoreConfig: KeyStoreConfig = DoNotUseKeyStore, protocol: String = defaultProtocol)
def sslFactory(httpsConfig: HttpsConfig): SSLSocketFactory = {
val sslContext = SSLContext.getInstance(httpsConfig.protocol)
sslContext.init(keyManagers(httpsConfig.keyStoreConfig), trustManagers(httpsConfig.trustStoreConfig), new java.security.SecureRandom)
sslContext.getSocketFactory
}
def hostNameVerifier(trustStoreConfig: TrustStoreConfig): HostnameVerifier = trustStoreConfig match {
case TrustServersByTrustStore(_, _) ⇒ HttpsURLConnection.getDefaultHostnameVerifier
case TrustAnyServer ⇒ TrustAllSslCertificates.allHostsValid
}
private def trustManagers(trustStoreConfig: TrustStoreConfig): Array[TrustManager] = trustStoreConfig match {
case TrustServersByTrustStore(path, password) ⇒
val inputStream = new FileInputStream(path)
val trustStore: JKeyStore = JKeyStore.getInstance(JKeyStore.getDefaultType)
trustStore.load(inputStream, password.toCharArray)
val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm)
trustManagerFactory.init(trustStore)
trustManagerFactory.getTrustManagers
case TrustAnyServer ⇒ TrustAllSslCertificates.trustAllCerts
}
private def keyManagers(keyStoreConfig: KeyStoreConfig): Array[KeyManager] = keyStoreConfig match {
case UseKeyStore(path, password) ⇒
val inputStream = new FileInputStream(path)
val keyStore: JKeyStore = JKeyStore.getInstance(JKeyStore.getDefaultType)
keyStore.load(inputStream, password.toCharArray)
val keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm)
keyManagerFactory.init(keyStore, password.toCharArray)
keyManagerFactory.getKeyManagers
case DoNotUseKeyStore ⇒ null
}
object TrustAllSslCertificates {
val trustAllCerts = Array[TrustManager](new X509TrustManager {
def getAcceptedIssuers: Array[X509Certificate] = { null }
def checkClientTrusted(certs: Array[X509Certificate], authType: String) {}
def checkServerTrusted(certs: Array[X509Certificate], authType: String) {}
})
val sc = SSLContext.getInstance(defaultProtocol)
sc.init(null, trustAllCerts, new java.security.SecureRandom)
val allHostsValid = new HostnameVerifier {
def verify(hostname: String, session: SSLSession) = { true }
}
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory)
HttpsURLConnection.setDefaultHostnameVerifier(allHostsValid)
}
} | jcaraballo/naive-http | src/main/scala/io/shaka/http/Https.scala | Scala | apache-2.0 | 3,179 |
package com.arcusys.valamis.uri.service
import com.arcusys.valamis.model.SkipTake
import com.arcusys.valamis.uri.model.TincanURIType.TincanURIType
import com.arcusys.valamis.uri.model.{TincanURI, TincanURIType}
import com.arcusys.valamis.uri.storage.TincanURIStorage
import java.util.UUID
import com.arcusys.learn.liferay.services.{CompanyHelper, CompanyLocalServiceHelper, ServiceContextHelper}
import com.arcusys.learn.liferay.util.PortalUtilHelper
import scala.util.Try
abstract class TincanURIServiceImpl extends TincanURIService {
def uriStorage: TincanURIStorage
override def getByURI(uri: String): Option[TincanURI] = {
uriStorage.get(uri)
}
override def getOrCreate(prefix: String, id: String, uriType: TincanURIType, content: Option[String]): TincanURI = {
lazy val objId = uriType match {
case TincanURIType.Package => id
case _ => Try(UUID.fromString(id)).getOrElse(UUID.randomUUID).toString
}
uriStorage.getById(id, uriType) match {
case Some(uri) => uri
case None =>
val uri = createUri(prefix, objId, uriType)
uriStorage.create(uri, objId, uriType, content.getOrElse(""))
}
}
override def createRandom(uriType: TincanURIType, content: Option[String]): TincanURI = {
val id = UUID.randomUUID.toString
val prefix = getLocalURL()
val uuid = Try(UUID.fromString(id)).getOrElse(UUID.randomUUID).toString
val uri = createUri(prefix, uuid, uriType)
uriStorage.create(uri, uuid, uriType, content.getOrElse(""))
}
override def getById(id: String, uriType: TincanURIType): Option[TincanURI] = {
uriStorage.getById(id, uriType)
}
override def getById(skipTake: Option[SkipTake], filter: String): Seq[TincanURI] = {
uriStorage.getAll(skipTake, filter)
}
def createUri(prefix: String, id: String, uriType: TincanURIType): String = {
s"$prefix$uriType/${uriType}_$id"
}
override def getLocalURL(suffix: String = "delegate/uri/", companyId: Option[Long] = None): String = {
val company = CompanyLocalServiceHelper.getCompany(companyId.getOrElse(CompanyHelper.getCompanyId))
val isSecure = if (ServiceContextHelper.getServiceContext != null)
ServiceContextHelper.getServiceContext.getRequest.isSecure
else false //TODO: get somehow isSecure when ServiceContextThreadLocal.getServiceContext == null
val rootUrl = PortalUtilHelper.getPortalURL(company.getVirtualHostname, PortalUtilHelper.getPortalPort(isSecure), isSecure) // http://localhost:8080
rootUrl + "/" + suffix
}
} | arcusys/JSCORM | valamis-core/src/main/scala/com/arcusys/valamis/uri/service/TincanURIServiceImpl.scala | Scala | gpl-3.0 | 2,534 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.locapriv.domain
import com.google.common.geometry.S1Angle
import fr.cnrs.liris.util.geo.{Distance, LatLng, Point}
import scala.util.Random
/**
* Applies Laplacian noise in order to guarantee Geo-Indistinguishability.
*
* Miguel E. Andrés, Nicolás E. Bordenabe, Konstantinos Chatzikokolakis and
* Catuscia Palamidessi. 2013. Geo-indistinguishability: differential privacy for
* location-based systems. In Proceedings of CCS'13.
*/
object Laplace {
/**
* Return a protected version of a mobility trace.
*
* @param trace Trace to protect.
* @param epsilon Privacy budget.
* @param seed Seed.
*/
def transform(trace: Iterable[Event], epsilon: Double, seed: Long = Random.nextLong): Iterable[Event] = {
val rnd = new Random(seed)
trace.map(event => event.withPoint(noise(event.point, epsilon, rnd)))
//TODO: validate this.
//trace.map(event => event.withLatLng(noise(event.latLng, epsilon, rnd)))
}
/**
* Return a geo-indistinguishable version of a single point.
*
* @param point Point to protect.
* @param epsilon Privacy budget.
* @param rnd Random number generator.
*/
def noise(point: Point, epsilon: Double, rnd: Random): Point = {
val azimuth = math.toDegrees(rnd.nextDouble() * 2 * math.Pi)
val z = rnd.nextDouble()
val distance = inverseCumulativeGamma(z, epsilon)
point.translate(S1Angle.degrees(azimuth), distance)
}
/**
* Return a geo-indistinguishable version of a single point.
*
* @param point Point to protect.
* @param epsilon Privacy budget.
* @param rnd Random number generator.
*/
def noise(point: LatLng, epsilon: Double, rnd: Random): LatLng = {
val azimuth = math.toDegrees(rnd.nextDouble() * 2 * math.Pi)
val z = rnd.nextDouble()
val distance = inverseCumulativeGamma(z, epsilon)
point.translate(S1Angle.degrees(azimuth), distance)
}
def inverseCumulativeGamma(z: Double, epsilon: Double): Distance = {
val x = (z - 1) / math.E
val r = -(LambertW.lambertWm1(x) + 1) / epsilon
Distance.meters(r)
}
}
/**
* Started with code donated by K. Briggs; added error estimates, GSL foo, and minor tweaks. Some Lambert-ology from
* [Corless, Gonnet, Hare, and Jeffrey, "On Lambert's W Function".]
*
* @author G. Jungman
*/
private object LambertW {
case class Result(value: Double, err: Double, outcome: Outcome.Value)
object Outcome {
sealed trait Value
case object GSL_SUCCESS extends Value
case object GSL_EMAXITER extends Value
case object GSL_EDOM extends Value
}
private val M_E = 2.71828182845904523536028747135266250
private val GSL_DBL_EPSILON = 2.2204460492503131e-16
private val c = Array(
-1.0,
2.331643981597124203363536062168,
-1.812187885639363490240191647568,
1.936631114492359755363277457668,
-2.353551201881614516821543561516,
3.066858901050631912893148922704,
-4.175335600258177138854984177460,
5.858023729874774148815053846119,
-8.401032217523977370984161688514,
12.250753501314460424,
-18.100697012472442755,
27.029044799010561650)
def lambertW0(x: Double): Double = {
val res = lambertW0E(x)
if (res.outcome eq Outcome.GSL_EMAXITER) {
throw new RuntimeException("Too many iterations")
}
res.value
}
def lambertWm1(x: Double): Double = {
val res = lambertWm1E(x)
if (res.outcome == Outcome.GSL_EMAXITER) {
throw new RuntimeException("Too many iterations")
}
res.value
}
def lambertW0E(x: Double): Result = {
val one_over_E = 1.0 / M_E
val q = x + one_over_E
if (x == 0.0) {
Result(value = 0.0, err = 0.0, Outcome.GSL_SUCCESS)
} else if (q < 0.0) {
Result(value = -1.0, err = Math.sqrt(-q), Outcome.GSL_EDOM)
} else if (q == 0.0) {
Result(value = -1.0, err = GSL_DBL_EPSILON, Outcome.GSL_SUCCESS)
} else if (q < 1.0e-03) {
val r = Math.sqrt(q)
val value = seriesEval(r)
Result(value = value, err = 2.0 * GSL_DBL_EPSILON * Math.abs(value), Outcome.GSL_SUCCESS)
} else {
val MAX_ITERS = 10
var w = .0
if (x < 1.0) {
val p = Math.sqrt(2.0 * M_E * q)
w = -1.0 + p * (1.0 + p * (-1.0 / 3.0 + p * 11.0 / 72.0))
} else {
w = Math.log(x)
if (x > 3.0) {
w -= Math.log(w)
}
}
halleyIteration(x, w, MAX_ITERS)
}
}
def lambertWm1E(x: Double): Result = {
if (x > 0.0) {
lambertW0E(x)
} else if (x == 0.0) {
Result(value = 0.0, err = 0.0, Outcome.GSL_SUCCESS)
} else {
val MAX_ITERS = 32
val one_over_E = 1.0 / M_E
val q = x + one_over_E
var w = .0
if (q < 0.0) {
return Result(value = -1.0, err = Math.sqrt(-q), Outcome.GSL_EDOM)
}
if (x < -1.0e-6) {
val r = -Math.sqrt(q)
w = seriesEval(r)
if (q < 3.0e-3) {
return Result(value = w, err = 5.0 * GSL_DBL_EPSILON * Math.abs(w), Outcome.GSL_SUCCESS)
}
} else {
val L_1 = Math.log(-x)
val L_2 = Math.log(-L_1)
w = L_1 - L_2 + L_2 / L_1
}
halleyIteration(x, w, MAX_ITERS)
}
}
/**
* Halley iteration (equation 5.12, Corless et al)
*/
private def halleyIteration(x: Double, w_initial: Double, max_iters: Int): Result = {
var w: Double = w_initial
var i: Int = 0
while (i < max_iters) {
{
var tol: Double = .0
val e: Double = Math.exp(w)
val p: Double = w + 1.0
var t: Double = w * e - x
if (w > 0) {
t = (t / p) / e
}
else {
t /= e * p - 0.5 * (p + 1.0) * t / p
}
w -= t
tol = 10 * GSL_DBL_EPSILON * Math.max(Math.abs(w), 1.0 / (Math.abs(p) * e))
if (Math.abs(t) < tol) {
return Result(value = w, err = 2.0 * tol, Outcome.GSL_SUCCESS)
}
i += 1
}
}
Result(value = w, err = Math.abs(w), Outcome.GSL_EMAXITER)
}
/**
* series which appears for q near zero; only the argument is different for
* the different branches
*/
private def seriesEval(r: Double): Double = {
val t_8 = c(8) + r * (c(9) + r * (c(10) + r * c(11)))
val t_5 = c(5) + r * (c(6) + r * (c(7) + r * t_8))
val t_1 = c(1) + r * (c(2) + r * (c(3) + r * (c(4) + r * t_5)))
c(0) + r * t_1
}
}
| privamov/accio | accio/java/fr/cnrs/liris/locapriv/domain/Laplace.scala | Scala | gpl-3.0 | 7,145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Changes for SnappyData data platform.
*
* Portions Copyright (c) 2017 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.hive
import java.io.{BufferedWriter, File, FileWriter}
import java.sql.Timestamp
import java.util.Date
import scala.collection.mutable.ArrayBuffer
import scala.tools.nsc.Properties
import org.apache.hadoop.fs.Path
import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.concurrent.Timeouts
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{QueryTest, Row, SparkSession}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.hive.test.{TestHive, TestHiveContext}
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.sql.types.{DecimalType, StructType}
import org.apache.spark.util.{ResetSystemProperties, Utils}
/**
* This suite tests spark-submit with applications using HiveContext.
*/
class HiveSparkSubmitSuite
extends SparkFunSuite
with Matchers
with BeforeAndAfterEach
with ResetSystemProperties
with Timeouts {
// TODO: rewrite these or mark them as slow tests to be run sparingly
override def beforeEach() {
super.beforeEach()
System.setProperty("spark.testing", "true")
}
test("temporary Hive UDF: define a UDF and use it") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", TemporaryHiveUDFTest.getClass.getName.stripSuffix("$"),
"--name", "TemporaryHiveUDFTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("permanent Hive UDF: define a UDF and use it") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", PermanentHiveUDFTest1.getClass.getName.stripSuffix("$"),
"--name", "PermanentHiveUDFTest1",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("permanent Hive UDF: use a already defined permanent function") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", PermanentHiveUDFTest2.getClass.getName.stripSuffix("$"),
"--name", "PermanentHiveUDFTest2",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("SPARK-8368: includes jars passed in through --jars") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jar3 = TestHive.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath
val jar4 = TestHive.getHiveFile("hive-hcatalog-core-0.13.1.jar").getCanonicalPath
val jarsString = Seq(jar1, jar2, jar3, jar4).map(j => j.toString).mkString(",")
val args = Seq(
"--class", SparkSubmitClassLoaderTest.getClass.getName.stripSuffix("$"),
"--name", "SparkSubmitClassLoaderTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("SPARK-8020: set sql conf in spark conf") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SparkSQLConfTest.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", "spark.sql.hive.metastore.version=0.12",
"--conf", "spark.sql.hive.metastore.jars=maven",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-8489: MissingRequirementError during reflection") {
// This test uses a pre-built jar to test SPARK-8489. In a nutshell, this test creates
// a HiveContext and uses it to create a data frame from an RDD using reflection.
// Before the fix in SPARK-8470, this results in a MissingRequirementError because
// the HiveContext code mistakenly overrides the class loader that contains user classes.
// For more detail, see sql/hive/src/test/resources/regression-test-SPARK-8489/*scala.
val version = Properties.versionNumberString match {
case v if v.startsWith("2.10") || v.startsWith("2.11") => v.substring(0, 4)
case x => throw new Exception(s"Unsupported Scala Version: $x")
}
val jarDir = getTestResourcePath("regression-test-SPARK-8489")
val testJar = s"$jarDir/test-$version.jar"
val args = Seq(
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--class", "Main",
testJar)
runSparkSubmit(args)
}
test("SPARK-9757 Persist Parquet relation with decimal column") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_9757.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-11009 fix wrong result of Window function in cluster mode") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_11009.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-14244 fix window partition size attribute binding failure") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_14244.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("set spark.sql.warehouse.dir") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetWarehouseLocationTest.getClass.getName.stripSuffix("$"),
"--name", "SetSparkWarehouseLocationTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("set hive.metastore.warehouse.dir") {
// In this test, we set hive.metastore.warehouse.dir in hive-site.xml but
// not set spark.sql.warehouse.dir. So, the warehouse dir should be
// the value of hive.metastore.warehouse.dir. Also, the value of
// spark.sql.warehouse.dir should be set to the value of hive.metastore.warehouse.dir.
val hiveWarehouseLocation = Utils.createTempDir()
hiveWarehouseLocation.delete()
val hiveSiteXmlContent =
s"""
|<configuration>
| <property>
| <name>hive.metastore.warehouse.dir</name>
| <value>$hiveWarehouseLocation</value>
| </property>
|</configuration>
""".stripMargin
// Write a hive-site.xml containing a setting of hive.metastore.warehouse.dir.
val hiveSiteDir = Utils.createTempDir()
val file = new File(hiveSiteDir.getCanonicalPath, "hive-site.xml")
val bw = new BufferedWriter(new FileWriter(file))
bw.write(hiveSiteXmlContent)
bw.close()
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetWarehouseLocationTest.getClass.getName.stripSuffix("$"),
"--name", "SetHiveWarehouseLocationTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.sql.test.expectedWarehouseDir=$hiveWarehouseLocation",
"--conf", s"spark.driver.extraClassPath=${hiveSiteDir.getCanonicalPath}",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-16901: set javax.jdo.option.ConnectionURL") {
// In this test, we set javax.jdo.option.ConnectionURL and set metastore version to
// 0.13. This test will make sure that javax.jdo.option.ConnectionURL will not be
// overridden by hive's default settings when we create a HiveConf object inside
// HiveClientImpl. Please see SPARK-16901 for more details.
val metastoreLocation = Utils.createTempDir()
metastoreLocation.delete()
val metastoreURL =
s"jdbc:derby:memory:;databaseName=${metastoreLocation.getAbsolutePath};create=true"
val hiveSiteXmlContent =
s"""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>$metastoreURL</value>
| </property>
|</configuration>
""".stripMargin
// Write a hive-site.xml containing a setting of hive.metastore.warehouse.dir.
val hiveSiteDir = Utils.createTempDir()
val file = new File(hiveSiteDir.getCanonicalPath, "hive-site.xml")
val bw = new BufferedWriter(new FileWriter(file))
bw.write(hiveSiteXmlContent)
bw.close()
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetMetastoreURLTest.getClass.getName.stripSuffix("$"),
"--name", "SetMetastoreURLTest",
"--master", "local[1]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.sql.test.expectedMetastoreURL=$metastoreURL",
"--conf", s"spark.driver.extraClassPath=${hiveSiteDir.getCanonicalPath}",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-18360: default table path of tables in default database should depend on the " +
"location of default database") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_18360.getClass.getName.stripSuffix("$"),
"--name", "SPARK-18360",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
// This is copied from org.apache.spark.deploy.SparkSubmitSuite
private def runSparkSubmit(args: Seq[String]): Unit = {
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val history = ArrayBuffer.empty[String]
val commands = Seq("./bin/spark-submit") ++ args
val commandLine = commands.mkString("'", "' '", "'")
val builder = new ProcessBuilder(commands: _*).directory(new File(sparkHome))
val env = builder.environment()
env.put("SPARK_TESTING", "1")
env.put("SPARK_HOME", sparkHome)
def captureOutput(source: String)(line: String): Unit = {
// This test suite has some weird behaviors when executed on Jenkins:
//
// 1. Sometimes it gets extremely slow out of unknown reason on Jenkins. Here we add a
// timestamp to provide more diagnosis information.
// 2. Log lines are not correctly redirected to unit-tests.log as expected, so here we print
// them out for debugging purposes.
val logLine = s"${new Timestamp(new Date().getTime)} - $source> $line"
// scalastyle:off println
println(logLine)
// scalastyle:on println
history += logLine
}
val process = builder.start()
new ProcessOutputCapturer(process.getInputStream, captureOutput("stdout")).start()
new ProcessOutputCapturer(process.getErrorStream, captureOutput("stderr")).start()
try {
val exitCode = failAfter(300.seconds) { process.waitFor() }
if (exitCode != 0) {
// include logs in output. Note that logging is async and may not have completed
// at the time this exception is raised
Thread.sleep(1000)
val historyLog = history.mkString("\\n")
fail {
s"""spark-submit returned with exit code $exitCode.
|Command line: $commandLine
|
|$historyLog
""".stripMargin
}
}
} catch {
case to: TestFailedDueToTimeoutException =>
val historyLog = history.mkString("\\n")
fail(s"Timeout of $commandLine" +
s" See the log4j logs for more detail." +
s"\\n$historyLog", to)
case t: Throwable => throw t
} finally {
// Ensure we still kill the process in case it timed out
process.destroy()
}
}
}
object SetMetastoreURLTest extends Logging {
def main(args: Array[String]): Unit = {
Utils.configTestLog4j("INFO")
val sparkConf = new SparkConf(loadDefaults = true)
val builder = SparkSession.builder()
.config(sparkConf)
.config("spark.ui.enabled", "false")
.config("spark.sql.hive.metastore.version", "0.13.1")
// The issue described in SPARK-16901 only appear when
// spark.sql.hive.metastore.jars is not set to builtin.
.config("spark.sql.hive.metastore.jars", "maven")
.enableHiveSupport()
val spark = builder.getOrCreate()
val expectedMetastoreURL =
spark.conf.get("spark.sql.test.expectedMetastoreURL")
logInfo(s"spark.sql.test.expectedMetastoreURL is $expectedMetastoreURL")
if (expectedMetastoreURL == null) {
throw new Exception(
s"spark.sql.test.expectedMetastoreURL should be set.")
}
// HiveExternalCatalog is used when Hive support is enabled.
val actualMetastoreURL =
spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
.getConf("javax.jdo.option.ConnectionURL", "this_is_a_wrong_URL")
logInfo(s"javax.jdo.option.ConnectionURL is $actualMetastoreURL")
if (actualMetastoreURL != expectedMetastoreURL) {
throw new Exception(
s"Expected value of javax.jdo.option.ConnectionURL is $expectedMetastoreURL. But, " +
s"the actual value is $actualMetastoreURL")
}
}
}
object SetWarehouseLocationTest extends Logging {
def main(args: Array[String]): Unit = {
Utils.configTestLog4j("INFO")
val sparkConf = new SparkConf(loadDefaults = true).set("spark.ui.enabled", "false")
val providedExpectedWarehouseLocation =
sparkConf.getOption("spark.sql.test.expectedWarehouseDir")
val (sparkSession, expectedWarehouseLocation) = providedExpectedWarehouseLocation match {
case Some(warehouseDir) =>
// If spark.sql.test.expectedWarehouseDir is set, the warehouse dir is set
// through spark-summit. So, neither spark.sql.warehouse.dir nor
// hive.metastore.warehouse.dir is set at here.
(new TestHiveContext(new SparkContext(sparkConf)).sparkSession, warehouseDir)
case None =>
val warehouseLocation = Utils.createTempDir()
warehouseLocation.delete()
val hiveWarehouseLocation = Utils.createTempDir()
hiveWarehouseLocation.delete()
// If spark.sql.test.expectedWarehouseDir is not set, we will set
// spark.sql.warehouse.dir and hive.metastore.warehouse.dir.
// We are expecting that the value of spark.sql.warehouse.dir will override the
// value of hive.metastore.warehouse.dir.
val session = new TestHiveContext(new SparkContext(sparkConf
.set("spark.sql.warehouse.dir", warehouseLocation.toString)
.set("hive.metastore.warehouse.dir", hiveWarehouseLocation.toString)))
.sparkSession
(session, warehouseLocation.toString)
}
if (sparkSession.conf.get("spark.sql.warehouse.dir") != expectedWarehouseLocation) {
throw new Exception(
"spark.sql.warehouse.dir is not set to the expected warehouse location " +
s"$expectedWarehouseLocation.")
}
val catalog = sparkSession.sessionState.catalog
sparkSession.sql("drop table if exists testLocation")
sparkSession.sql("drop database if exists testLocationDB cascade")
{
sparkSession.sql("create table testLocation (a int)")
val tableMetadata =
catalog.getTableMetadata(TableIdentifier("testLocation", Some("default")))
val expectedLocation =
"file:" + expectedWarehouseLocation.toString + "/testlocation"
val actualLocation = tableMetadata.location
if (actualLocation != expectedLocation) {
throw new Exception(
s"Expected table location is $expectedLocation. But, it is actually $actualLocation")
}
sparkSession.sql("drop table testLocation")
}
{
sparkSession.sql("create database testLocationDB")
sparkSession.sql("use testLocationDB")
sparkSession.sql("create table testLocation (a int)")
val tableMetadata =
catalog.getTableMetadata(TableIdentifier("testLocation", Some("testLocationDB")))
val expectedLocation =
"file:" + expectedWarehouseLocation.toString + "/testlocationdb.db/testlocation"
val actualLocation = tableMetadata.location
if (actualLocation != expectedLocation) {
throw new Exception(
s"Expected table location is $expectedLocation. But, it is actually $actualLocation")
}
sparkSession.sql("drop table testLocation")
sparkSession.sql("use default")
sparkSession.sql("drop database testLocationDB")
}
}
}
// This application is used to test defining a new Hive UDF (with an associated jar)
// and use this UDF. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object TemporaryHiveUDFTest extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set("spark.ui.enabled", "false")
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Registering a temporary Hive UDF provided in a jar.")
val jar = hiveContext.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath
hiveContext.sql(
s"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
|USING JAR '$jar'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP temporary FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This application is used to test defining a new Hive UDF (with an associated jar)
// and use this UDF. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object PermanentHiveUDFTest1 extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set("spark.ui.enabled", "false")
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Registering a permanent Hive UDF provided in a jar.")
val jar = hiveContext.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath
hiveContext.sql(
s"""
|CREATE FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
|USING JAR '$jar'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This application is used to test that a pre-defined permanent function with a jar
// resources can be used. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object PermanentHiveUDFTest2 extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set("spark.ui.enabled", "false")
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Write the metadata of a permanent Hive UDF into metastore.")
val jar = hiveContext.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath
val function = CatalogFunction(
FunctionIdentifier("example_max"),
"org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax",
FunctionResource(JarResource, jar) :: Nil)
hiveContext.sessionState.catalog.createFunction(function, ignoreIfExists = false)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This object is used for testing SPARK-8368: https://issues.apache.org/jira/browse/SPARK-8368.
// We test if we can load user jars in both driver and executors when HiveContext is used.
object SparkSubmitClassLoaderTest extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
val conf = new SparkConf()
val hiveWarehouseLocation = Utils.createTempDir()
conf.set("spark.ui.enabled", "false")
conf.set("spark.sql.warehouse.dir", hiveWarehouseLocation.toString)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
val df = hiveContext.createDataFrame((1 to 100).map(i => (i, i))).toDF("i", "j")
logInfo("Testing load classes at the driver side.")
// First, we load classes at driver side.
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
throw new Exception("Could not load user class from jar:\\n", t)
}
// Second, we load classes at the executor side.
logInfo("Testing load classes at the executor side.")
val result = df.rdd.mapPartitions { x =>
var exception: String = null
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
exception = t + "\\n" + Utils.exceptionString(t)
exception = exception.replaceAll("\\n", "\\n\\t")
}
Option(exception).toSeq.iterator
}.collect()
if (result.nonEmpty) {
throw new Exception("Could not load user class from jar:\\n" + result(0))
}
// Load a Hive UDF from the jar.
logInfo("Registering temporary Hive UDF provided in a jar.")
hiveContext.sql(
"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Load a Hive SerDe from the jar.
logInfo("Creating a Hive table with a SerDe provided in a jar.")
hiveContext.sql(
"""
|CREATE TABLE t1(key int, val string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
""".stripMargin)
// Actually use the loaded UDF and SerDe.
logInfo("Writing data into the table.")
hiveContext.sql(
"INSERT INTO TABLE t1 SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = hiveContext.table("t1").orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"table t1 should have 10 rows instead of $count rows")
}
logInfo("Test finishes.")
sc.stop()
}
}
// This object is used for testing SPARK-8020: https://issues.apache.org/jira/browse/SPARK-8020.
// We test if we can correctly set spark sql configurations when HiveContext is used.
object SparkSQLConfTest extends Logging {
def main(args: Array[String]) {
Utils.configTestLog4j("INFO")
// We override the SparkConf to add spark.sql.hive.metastore.version and
// spark.sql.hive.metastore.jars to the beginning of the conf entry array.
// So, if metadataHive get initialized after we set spark.sql.hive.metastore.version but
// before spark.sql.hive.metastore.jars get set, we will see the following exception:
// Exception in thread "main" java.lang.IllegalArgumentException: Builtin jars can only
// be used when hive execution version == hive metastore version.
// Execution: 0.13.1 != Metastore: 0.12. Specify a valid path to the correct hive jars
// using $HIVE_METASTORE_JARS or change spark.sql.hive.metastore.version to 0.13.1.
val conf = new SparkConf() {
override def getAll: Array[(String, String)] = {
def isMetastoreSetting(conf: String): Boolean = {
conf == "spark.sql.hive.metastore.version" || conf == "spark.sql.hive.metastore.jars"
}
// If there is any metastore settings, remove them.
val filteredSettings = super.getAll.filterNot(e => isMetastoreSetting(e._1))
// Always add these two metastore settings at the beginning.
("spark.sql.hive.metastore.version" -> "0.12") +:
("spark.sql.hive.metastore.jars" -> "maven") +:
filteredSettings
}
// For this simple test, we do not really clone this object.
override def clone: SparkConf = this
}
conf.set("spark.ui.enabled", "false")
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Run a simple command to make sure all lazy vals in hiveContext get instantiated.
hiveContext.tables().collect()
sc.stop()
}
}
object SPARK_9757 extends QueryTest {
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
Utils.configTestLog4j("INFO")
val hiveWarehouseLocation = Utils.createTempDir()
val sparkContext = new SparkContext(
new SparkConf()
.set("spark.sql.hive.metastore.version", "0.13.1")
.set("spark.sql.hive.metastore.jars", "maven")
.set("spark.ui.enabled", "false")
.set("spark.sql.warehouse.dir", hiveWarehouseLocation.toString))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
import hiveContext.implicits._
val dir = Utils.createTempDir()
dir.delete()
try {
{
val df =
hiveContext
.range(10)
.select(('id + 0.1) cast DecimalType(10, 3) as 'dec)
df.write.option("path", dir.getCanonicalPath).mode("overwrite").saveAsTable("t")
checkAnswer(hiveContext.table("t"), df)
}
{
val df =
hiveContext
.range(10)
.select(callUDF("struct", ('id + 0.2) cast DecimalType(10, 3)) as 'dec_struct)
df.write.option("path", dir.getCanonicalPath).mode("overwrite").saveAsTable("t")
checkAnswer(hiveContext.table("t"), df)
}
} finally {
dir.delete()
hiveContext.sql("DROP TABLE t")
sparkContext.stop()
}
}
}
object SPARK_11009 extends QueryTest {
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
Utils.configTestLog4j("INFO")
val sparkContext = new SparkContext(
new SparkConf()
.set("spark.ui.enabled", "false")
.set("spark.sql.shuffle.partitions", "100"))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
try {
val df = spark.range(1 << 20)
val df2 = df.select((df("id") % 1000).alias("A"), (df("id") / 1000).alias("B"))
val ws = Window.partitionBy(df2("A")).orderBy(df2("B"))
val df3 = df2.select(df2("A"), df2("B"), row_number().over(ws).alias("rn")).filter("rn < 0")
if (df3.rdd.count() != 0) {
throw new Exception("df3 should have 0 output row.")
}
} finally {
sparkContext.stop()
}
}
}
object SPARK_14244 extends QueryTest {
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
Utils.configTestLog4j("INFO")
val sparkContext = new SparkContext(
new SparkConf()
.set("spark.ui.enabled", "false")
.set("spark.sql.shuffle.partitions", "100"))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
import hiveContext.implicits._
try {
val window = Window.orderBy('id)
val df = spark.range(2).select(cume_dist().over(window).as('cdist)).orderBy('cdist)
checkAnswer(df, Seq(Row(0.5D), Row(1.0D)))
} finally {
sparkContext.stop()
}
}
}
object SPARK_18360 {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.config("spark.ui.enabled", "false")
.enableHiveSupport().getOrCreate()
val defaultDbLocation = spark.catalog.getDatabase("default").locationUri
assert(new Path(defaultDbLocation) == new Path(spark.sharedState.warehousePath))
val hiveClient = spark.sharedState.externalCatalog.asInstanceOf[HiveExternalCatalog].client
try {
val tableMeta = CatalogTable(
identifier = TableIdentifier("test_tbl", Some("default")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("i", "int"),
provider = Some(DDLUtils.HIVE_PROVIDER))
val newWarehousePath = Utils.createTempDir().getAbsolutePath
hiveClient.runSqlHive(s"SET hive.metastore.warehouse.dir=$newWarehousePath")
hiveClient.createTable(tableMeta, ignoreIfExists = false)
val rawTable = hiveClient.getTable("default", "test_tbl")
// Hive will use the value of `hive.metastore.warehouse.dir` to generate default table
// location for tables in default database.
assert(rawTable.storage.locationUri.get.contains(newWarehousePath))
hiveClient.dropTable("default", "test_tbl", ignoreIfNotExists = false, purge = false)
spark.sharedState.externalCatalog.createTable(tableMeta, ignoreIfExists = false)
val readBack = spark.sharedState.externalCatalog.getTable("default", "test_tbl")
// Spark SQL will use the location of default database to generate default table
// location for tables in default database.
assert(readBack.storage.locationUri.get.contains(defaultDbLocation))
} finally {
hiveClient.dropTable("default", "test_tbl", ignoreIfNotExists = true, purge = false)
hiveClient.runSqlHive(s"SET hive.metastore.warehouse.dir=$defaultDbLocation")
}
}
}
| big-pegasus/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala | Scala | apache-2.0 | 35,698 |
package org.jetbrains.plugins.scala.lang.transformation
package calls
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScPrefixExpr
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaCode._
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* @author Pavel Fatin
*/
class ExpandUnaryCall extends AbstractTransformer {
def transformation(implicit project: ProjectContext): PartialFunction[PsiElement, Unit] = {
case e @ ScPrefixExpr(RenamedReference(s, t), r) if t == "unary_" + s =>
e.replace(code"$r.$t}")
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/transformation/calls/ExpandUnaryCall.scala | Scala | apache-2.0 | 584 |
package tech.olczak.mt
import scala.App
import scalaz._
import scalaz.effect.IO
import scalaz.effect.IO._
import tech.olczak.lambda.{App => Apply,_}
import scalaz.Scalaz._
object RwstExample extends App {
type StateType = Int
type Logs = List[String]
type Eval[A] = RWST[IO, Env, Logs, StateType, A]
def runEval[A](env: Env, st: StateType, op: Eval[A]): IO[(Logs, A, StateType)] = {
op.run(env, st)
}
def eval(exp: Exp): Eval[LambdaValue] = {
implicit val E0 = implicitly[MonadTrans[λ[(α[_], β) => RWST[α, Env, Logs, StateType, β]]]]
import E0._
implicit val E1 = implicitly[MonadReader[Eval, Env]]
implicit val E2 = implicitly[MonadListen[Eval, Logs]]
import E1._
import E2._
exp match {
case Lit(i) =>
for {
_ <- tick[Eval]
_ <- liftM(putStrLn(s"Lit $i"))
} yield IntVal(i).asInstanceOf[LambdaValue]
case Var(name) =>
for {
_ <- tick[Eval]
_ <- tell(List(s"name $name"))
env <- ask
} yield env(name) //todo handle error
case Add(e1, e2) =>
for {
_ <- tick[Eval]
val1 <- eval(e1) //todo handle error
val2 <- eval(e2)
result = (val1, val2) match {
case (IntVal(v1), IntVal(v2)) => IntVal(v1 + v2).asInstanceOf[LambdaValue]
}
} yield result
case Abs(name, body) =>
for {
_ <- tick[Eval]
env <- ask
} yield FunVal(env, name, body).asInstanceOf[LambdaValue]
case Apply(e1, e2) =>
for {
_ <- tick[Eval]
val1 <- eval(e1)
val2 <- eval(e2)
result <- val1 match {
case FunVal(env2, name, body) => local(_ => env2 + (name -> val2))(eval(body))
}
} yield result
}
}
def tick[F[_]](implicit M: MonadState[F, Int]): F[Unit] =
for {
st <- M.get
_ <- M.put(st + 1)
} yield ()
case class Failure(msg: String)
val exp = Add(Lit(12), Apply(Abs("x", Var("x")), Add(Lit(4), Lit(2))))
val result = runEval(Map.empty, 0, eval(exp))
println(result.unsafePerformIO())
}
| lolczak/mt-examples | src/main/scala/tech/olczak/mt/RwstExample.scala | Scala | apache-2.0 | 2,149 |
package models.daos
import models.Mantra
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{ Try, Success, Failure }
import java.text.SimpleDateFormat
import java.util.Calendar
/**
* Mantra Data Access Object implementation using Slick to persist to/from default database.
* Provides access to persistence layer for all Mantra objects.
*
* @author Leanne Northrop
* @since 1.0.0
*/
class MantraDAOImpl extends MantraDAO with DAOSlick {
import driver.api._
/**
* Finds all mantra.
*
* @return The found mantra or empty list if no mantra could be found.
*/
def findAll() = {
db.run(mantrasTable.filter(_.isArchived === 0).result).map(_.map { row =>
Mantra(Some(row.id), row.name, row.description, row.imgUrl, row.year, row.month, row.day)
})
}
/**
* Finds a mantra by its ID.
*
* @param mantraID The ID of the mantra to find.
* @return The found mantra or None if no mantra for the given ID could be found.
*/
def findById(mantraID: Long): Future[Mantra] = {
db.run(mantrasTable.filter(_.id === mantraID).filter(_.isArchived === 0).result).map { result =>
val list = result.map { row =>
Mantra(Some(row.id), row.name, row.description, row.imgUrl, row.year, row.month, row.day)
}
if (list.length > 0) list.head else throw new java.util.NoSuchElementException(s"Mantra with id ${mantraID} not found.")
}
}
/**
* Saves a mantra.
*
* @param mantra The mantra to save.
* @return The saved mantra.
*/
def save(mantra: models.Mantra): Future[Mantra] = {
val id = if (mantra.id == None) -1 else mantra.id.get
val dbMantra = MantraRow(id, mantra.name, mantra.description, mantra.imgUrl, mantra.year, mantra.month, mantra.day, 0)
val actions = (for {
result <- (mantrasTable returning mantrasTable.map(_.id)).insertOrUpdate(dbMantra)
} yield result).transactionally
val f = db.run(actions).map { id =>
id match {
case Some(newId) => mantra.copy(id = Some(newId)) // Some is returned for insert
case None => mantra // None is returned for update
}
}
f
}
/**
* 'Deletes' a mantra.
*
* @param mantra The mantra to delete
* @return true if deleted false otherwise
*/
def delete(mantra: Mantra): Future[Boolean] = {
val today = Calendar.getInstance.getTime
val f = new SimpleDateFormat("dd-MM-YYYY kk:hh:ss")
val id = if (mantra.id == None) -1 else mantra.id.get
val dbMantra = MantraRow(id, s"Archived '${mantra.name}' at ${f.format(today)}", mantra.description, mantra.imgUrl, mantra.year, mantra.month, mantra.day, 1)
val f2 = db.run(mantrasTable.insertOrUpdate(dbMantra)).map { _ =>
true
} recover {
case _: Throwable => false
}
f2
}
} | leannenorthrop/play-mantra-accumulations | app/models/daos/MantraDAOImpl.scala | Scala | apache-2.0 | 2,868 |
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
import scalariform.formatter.preferences._
object Formatting {
val formattingSettings = Seq(ScalariformKeys.preferences := ScalariformKeys.preferences.value
.setPreference(AlignParameters, false)
.setPreference(AlignSingleLineCaseStatements, true)
.setPreference(AlignSingleLineCaseStatements.MaxArrowIndent, 90)
.setPreference(DanglingCloseParenthesis, Preserve)
.setPreference(DoubleIndentConstructorArguments, true)
.setPreference(RewriteArrowSymbols, true)
.setPreference(UseUnicodeArrows, false))
}
| xebia/cakemix | project/Formatting.scala | Scala | mit | 590 |
package scalaxy.union
import scala.language.experimental.macros
import scala.language.implicitConversions
import scala.annotation.implicitNotFound
/**
* (A <|< B) means that either A <:< B, or if B is an union, there is one member C of B for which A <:< C.
*/
@implicitNotFound(msg = "Cannot prove that ${A} <|< ${B}.")
trait <|<[A, B]
object <|< {
implicit def <|<[A, B]: A <|< B = macro scalaxy.union.internal.<|<[A, B, A <|< B]
}
| nativelibs4java/Scalaxy | Union/src/main/scala/scalaxy/union/MatchesMemberOfUnion.scala | Scala | bsd-3-clause | 440 |
package be.objectify.deadbolt.scala.test.controllers
import be.objectify.deadbolt.scala.test.dao.{SubjectDao, TestSubjectDao}
import play.api.inject._
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.mvc.Results
import play.api.test.PlaySpecification
import play.api.{Application, Mode}
abstract class AbstractUnitSpec extends PlaySpecification with Results {
def testApp: Application = new GuiceApplicationBuilder().in(Mode.Test).bindings(bind[SubjectDao].to[TestSubjectDao]).build()
}
| schaloner/deadbolt-2-scala | test-app/test/be/objectify/deadbolt/scala/test/controllers/AbstractUnitSpec.scala | Scala | apache-2.0 | 526 |
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.actor.dsl
import scala.concurrent.Await
import akka.actor.ActorLogging
import scala.collection.immutable.TreeSet
import scala.concurrent.duration._
import akka.actor.Cancellable
import akka.actor.Actor
import scala.collection.mutable.Queue
import akka.actor.ActorSystem
import akka.actor.ActorRef
import akka.util.Timeout
import akka.actor.Status
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicInteger
import akka.pattern.ask
import akka.actor.ActorDSL
import akka.actor.Props
/**
* INTERNAL API
*/
private[akka] object Inbox {
private sealed trait Query {
def deadline: Deadline
def withClient(c: ActorRef): Query
def client: ActorRef
}
private final case class Get(deadline: Deadline, client: ActorRef = null) extends Query {
def withClient(c: ActorRef) = copy(client = c)
}
private final case class Select(deadline: Deadline, predicate: PartialFunction[Any, Any], client: ActorRef = null) extends Query {
def withClient(c: ActorRef) = copy(client = c)
}
private final case class StartWatch(target: ActorRef)
private case object Kick
}
trait Inbox { this: ActorDSL.type ⇒
import Inbox._
protected trait InboxExtension { this: Extension ⇒
val DSLInboxQueueSize = config.getInt("inbox-size")
val inboxNr = new AtomicInteger
val inboxProps = Props(classOf[InboxActor], ActorDSL, DSLInboxQueueSize)
def newReceiver: ActorRef = mkChild(inboxProps, "inbox-" + inboxNr.incrementAndGet)
}
private implicit val deadlineOrder: Ordering[Query] = new Ordering[Query] {
def compare(left: Query, right: Query): Int = left.deadline.time compare right.deadline.time
}
private class InboxActor(size: Int) extends Actor with ActorLogging {
var clients = Queue.empty[Query]
val messages = Queue.empty[Any]
var clientsByTimeout = TreeSet.empty[Query]
var printedWarning = false
def enqueueQuery(q: Query) {
val query = q withClient sender()
clients enqueue query
clientsByTimeout += query
}
def enqueueMessage(msg: Any) {
if (messages.size < size) messages enqueue msg
else {
if (!printedWarning) {
log.warning("dropping message: either your program is buggy or you might want to increase akka.actor.dsl.inbox-size, current value is " + size)
printedWarning = true
}
}
}
var currentMsg: Any = _
val clientPredicate: (Query) ⇒ Boolean = {
case _: Get ⇒ true
case Select(_, p, _) ⇒ p isDefinedAt currentMsg
case _ ⇒ false
}
var currentSelect: Select = _
val messagePredicate: (Any ⇒ Boolean) = (msg) ⇒ currentSelect.predicate.isDefinedAt(msg)
var currentDeadline: Option[(Deadline, Cancellable)] = None
def receive = ({
case g: Get ⇒
if (messages.isEmpty) enqueueQuery(g)
else sender() ! messages.dequeue()
case s @ Select(_, predicate, _) ⇒
if (messages.isEmpty) enqueueQuery(s)
else {
currentSelect = s
messages.dequeueFirst(messagePredicate) match {
case Some(msg) ⇒ sender() ! msg
case None ⇒ enqueueQuery(s)
}
currentSelect = null
}
case StartWatch(target) ⇒ context watch target
case Kick ⇒
val now = Deadline.now
val pred = (q: Query) ⇒ q.deadline.time < now.time
val overdue = clientsByTimeout.iterator.takeWhile(pred)
while (overdue.hasNext) {
val toKick = overdue.next()
toKick.client ! Status.Failure(new TimeoutException("deadline passed"))
}
clients = clients.filterNot(pred)
clientsByTimeout = clientsByTimeout.from(Get(now))
case msg ⇒
if (clients.isEmpty) enqueueMessage(msg)
else {
currentMsg = msg
clients.dequeueFirst(clientPredicate) match {
case Some(q) ⇒ { clientsByTimeout -= q; q.client ! msg }
case None ⇒ enqueueMessage(msg)
}
currentMsg = null
}
}: Receive) andThen { _ ⇒
if (clients.isEmpty) {
if (currentDeadline.isDefined) {
currentDeadline.get._2.cancel()
currentDeadline = None
}
} else {
val next = clientsByTimeout.head.deadline
import context.dispatcher
if (currentDeadline.isEmpty) {
currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick)))
} else {
// must not rely on the Scheduler to not fire early (for robustness)
currentDeadline.get._2.cancel()
currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick)))
}
}
}
}
/*
* make sure that AskTimeout does not accidentally mess up message reception
* by adding this extra time to the real timeout
*/
private val extraTime = 1.minute
/**
* Create a new actor which will internally queue up messages it gets so that
* they can be interrogated with the `akka.actor.dsl.Inbox!.Inbox!.receive`
* and `akka.actor.dsl.Inbox!.Inbox!.select` methods. It will be created as
* a system actor in the ActorSystem which is implicitly (or explicitly)
* supplied.
*/
def inbox()(implicit system: ActorSystem): Inbox = new Inbox(system)
class Inbox(system: ActorSystem) extends akka.actor.Inbox {
val receiver: ActorRef = Extension(system).newReceiver
// Java API
def getRef: ActorRef = receiver
def send(target: ActorRef, msg: AnyRef): Unit = target.tell(msg, receiver)
private val defaultTimeout: FiniteDuration = Extension(system).DSLDefaultTimeout
/**
* Receive a single message from the internal `receiver` actor. The supplied
* timeout is used for cleanup purposes and its precision is subject to the
* resolution of the system’s scheduler (usually 100ms, but configurable).
*
* <b>Warning:</b> This method blocks the current thread until a message is
* received, thus it can introduce dead-locks (directly as well as
* indirectly by causing starvation of the thread pool). <b>Do not use
* this method within an actor!</b>
*/
def receive(timeout: FiniteDuration = defaultTimeout): Any = {
implicit val t = Timeout(timeout + extraTime)
Await.result(receiver ? Get(Deadline.now + timeout), Duration.Inf)
}
/**
* Receive a single message for which the given partial function is defined
* and return the transformed result, using the internal `receiver` actor.
* The supplied timeout is used for cleanup purposes and its precision is
* subject to the resolution of the system’s scheduler (usually 100ms, but
* configurable).
*
* <b>Warning:</b> This method blocks the current thread until a message is
* received, thus it can introduce dead-locks (directly as well as
* indirectly by causing starvation of the thread pool). <b>Do not use
* this method within an actor!</b>
*/
def select[T](timeout: FiniteDuration = defaultTimeout)(predicate: PartialFunction[Any, T]): T = {
implicit val t = Timeout(timeout + extraTime)
predicate(Await.result(receiver ? Select(Deadline.now + timeout, predicate), Duration.Inf))
}
/**
* Make the inbox’s actor watch the target actor such that reception of the
* Terminated message can then be awaited.
*/
def watch(target: ActorRef): Unit = receiver ! StartWatch(target)
/**
* Overridden finalizer which will try to stop the actor once this Inbox
* is no longer referenced.
*/
override def finalize() {
system.stop(receiver)
}
}
implicit def senderFromInbox(implicit inbox: Inbox): ActorRef = inbox.receiver
}
| rorygraves/perf_tester | corpus/akka/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala | Scala | apache-2.0 | 7,900 |
package exercises.ch03
object Ex25 {
def size[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l, r) => size(l) + size(r)
}
def main(args: Array[String]): Unit ={
println(size(Branch(Branch(Leaf('A'), Leaf('B')), Leaf('C'))))
}
}
| VladMinzatu/fpinscala-exercises | src/main/scala/exercises/ch03/Ex25.scala | Scala | mit | 266 |
package gangstead
import akka.pattern.{ ask, pipe }
import akka.actor.ActorSystem
import akka.actor.Props
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global //instead of system.dispatcher from the sample code
import scala.concurrent.duration._ //For the timeout duratino "5 seconds"
import scala.concurrent.Future //For the `mapTo` function
import scala.language.postfixOps //removes warning on postfix ops like "5 seconds"
final case class Result(x: Int, s: String, d: Double)
case object Request
object main extends App {
//Create the actors
val system = ActorSystem("akka-actor-demo")
val actorA = system.actorOf(Props[actorA], "actorA")
val actorB = system.actorOf(Props[actorB], "actorB")
val actorC = system.actorOf(Props[actorC], "actorC")
val actorD = system.actorOf(Props[actorD], "actorD")
implicit val timeout = Timeout(5 seconds) // needed for `?` below
val f: Future[Result] =
for {
x <- ask(actorA, Request).mapTo[Int] // call pattern directly
s <- (actorB ask Request).mapTo[String] // call by implicit conversion
d <- (actorC ? Request).mapTo[Double] // call by symbolic name
} yield Result(x, s, d)
f pipeTo actorD // .. or ..
//pipe(f) to actorD
println("End of main execution")
}
| gangstead/akka-actor-demo | src/main/scala/gangstead/main.scala | Scala | mit | 1,263 |
package synereo.client.handlers
import diode._
import diode.data._
import shared.models.MessagePost
import synereo.client.rootmodels.MessagesRootModel
import diode.util.{Retry, RetryPolicy}
import shared.dtos._
import synereo.client.logger
import synereo.client.services.{CoreApi, SYNEREOCircuit}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success, Try}
import diode.AnyAction._
import org.widok.moment.Moment
import synereo.client.utils.{AppUtils, ConnectionsUtils, ContentUtils}
// Actions
//scalastyle:off
case class RefreshMessages(potResult: Pot[MessagesRootModel] = Empty, retryPolicy: RetryPolicy = Retry(3))
extends PotActionRetriable[MessagesRootModel, RefreshMessages] {
override def next(value: Pot[MessagesRootModel], newRetryPolicy: RetryPolicy): RefreshMessages = RefreshMessages(value, newRetryPolicy)
}
//case class StoreCnxnAndLabels(slctzId: String, sessionUriName: String)
case class ClearMessages()
class MessagesHandler[M](modelRW: ModelRW[M, Pot[MessagesRootModel]]) extends ActionHandler(modelRW) {
override def handle: PartialFunction[Any, ActionResult[M]] = {
/**
* This is the prime action which maintains the ping cycle.
* It extends the PotActionRetriable trait which makes it retry the action
* if a failure occurs in the session ping
* However it is observed that the retry action is triggered for any error in the
* dom even if it is not bound to the MessagesRootModel.
* This is not a problem as of now, however if you are seeing too many session ping in
* your network tab then probably this action is the culprit. This may be a bug in
* Diode library. Need to investigate.
*/
case action: RefreshMessages =>
val updateF = action.effectWithRetry {
CoreApi.sessionPing(SYNEREOCircuit.zoom(_.sessionRootModel.sessionUri).value)
} { messagesResponse =>
SYNEREOCircuit.dispatch(RefreshMessages())
val currentVal = if (value.nonEmpty) value.get.messagesModelList else Nil
val msg = currentVal ++ ContentUtils
.processRes(messagesResponse)
.filterNot(_.pageOfPosts.isEmpty)
.flatMap(content => Try(upickle.default.read[MessagePost](content.pageOfPosts(0))).toOption)
.map(ConnectionsUtils.getSenderReceivers)
MessagesRootModel(msg.sortWith((x, y) => Moment(x.created).isAfter(Moment(y.created))))
}
action.handleWith(this, updateF)(PotActionRetriable.handler())
case ClearMessages() =>
updated(Pot.empty)
}
} | LivelyGig/ProductWebUI | sclient/src/main/scala/synereo/client/Handlers/MessagesHandler.scala | Scala | apache-2.0 | 2,571 |
package chandu0101.scalajs.react.components
package materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import scala.scalajs.js
import scala.scalajs.js.`|`
/**
* This file is generated - submit issues instead of PR against it
*/
case class MuiCardMedia(
key: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
/* Whether a click on this card component expands the card. Can be set on any child of the Card component.*/
actAsExpander: js.UndefOr[Boolean] = js.undefined,
/* Whether this card component is expandable. Can be set on any child of the Card component.*/
expandable: js.UndefOr[Boolean] = js.undefined,
mediaStyle: js.UndefOr[CssProperties] = js.undefined,
overlay: js.UndefOr[ReactNode] = js.undefined,
overlayContainerStyle: js.UndefOr[CssProperties] = js.undefined,
overlayContentStyle: js.UndefOr[CssProperties] = js.undefined,
overlayStyle: js.UndefOr[CssProperties] = js.undefined,
/* Override the inline-styles of the root element.*/
style: js.UndefOr[CssProperties] = js.undefined){
def apply(children: ReactNode*) = {
val props = JSMacro[MuiCardMedia](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.CardMedia)
if (children.isEmpty)
f(props).asInstanceOf[ReactComponentU_]
else if (children.size == 1)
f(props, children.head).asInstanceOf[ReactComponentU_]
else
f(props, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
| elacin/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiCardMedia.scala | Scala | apache-2.0 | 1,628 |
package julienrf.json.derived
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.featurespec.AnyFeatureSpec
import org.scalatestplus.scalacheck.Checkers
import play.api.libs.json._
class NameAdapterSuite extends AnyFeatureSpec with Checkers {
Feature("use camelCase as the default casing for field names") {
Scenario("product type") {
case class Foo(sC: String, iC: Int)
implicit val fooArbitrary: Arbitrary[(Foo, JsValue)] =
Arbitrary(for (s <- Gen.alphaStr; i <- arbitrary[Int]) yield (Foo(s, i), Json.obj("sC" -> s, "iC" -> i)))
implicit val fooFormat: OFormat[Foo] = oformat()
jsonIdentityLaw[Foo]
}
}
Feature("customize the casing for field names") {
Scenario("product type") {
case class Foo(sC: String, iC: Int)
implicit val fooArbitrary: Arbitrary[(Foo, JsValue)] =
Arbitrary(for (s <- Gen.alphaStr; i <- arbitrary[Int]) yield (Foo(s, i), Json.obj("s_c" -> s, "i_c" -> i)))
implicit val fooFormat: OFormat[Foo] = oformat(snakeAdapter())
jsonIdentityLaw[Foo]
}
Scenario("sum types") {
sealed trait Foo
case class Bar(xC: Int) extends Foo
case class Baz(sC: String) extends Foo
case object Bah extends Foo
implicit lazy val fooFormat: OFormat[Foo] = flat.oformat((__ \\ "type").format[String], snakeAdapter())
implicit val fooArbitrary: Arbitrary[(Foo, JsValue)] =
Arbitrary(
Gen.oneOf(
arbitrary[Int].map(i => (Bar(i), Json.obj("x_c" -> i, "type" -> "Bar"))),
Gen.alphaStr.map(s => (Baz(s), Json.obj("s_c" -> s, "type" -> "Baz"))),
Gen.const((Bah, Json.obj("type" -> "Bah"))
)
))
jsonIdentityLaw[Foo]
}
Scenario("sum types with options") {
sealed trait Foo
case class Bar(xC: Int) extends Foo
case class Baz(sC: String) extends Foo
case object Bah extends Foo
case class Bat(oC: Option[String]) extends Foo
implicit val fooFormat: OFormat[Foo] = flat.oformat((__ \\ "type").format[String], snakeAdapter())
implicit val fooArbitrary: Arbitrary[(Foo, JsValue)] =
Arbitrary(
Gen.oneOf(
arbitrary[Int].map(i => (Bar(i), Json.obj("x_c" -> i, "type" -> "Bar"))),
Gen.alphaStr.map(s => (Baz(s), Json.obj("s_c" -> s, "type" -> "Baz"))),
Gen.const((Bah, Json.obj("type" -> "Bah"))),
arbitrary[Option[String]].map(s => (Bat(s), Json.obj("type" -> "Bat") ++ s.fold(Json.obj())(x => Json.obj("o_c" -> x))))
))
jsonIdentityLaw[Foo]
}
Scenario("recursive type") {
sealed trait Tree
case class Leaf(lS: String) extends Tree
case class Node(lhsSnake: Tree, rhsSnake: Tree) extends Tree
def writeTree(tree: Tree): JsValue = tree match {
case n: Node =>
Json.obj(
"type" -> "Node",
"lhs_snake" -> writeTree(n.lhsSnake),
"rhs_snake" -> writeTree(n.rhsSnake)
)
case l: Leaf => Json.obj(
"type" -> "Leaf",
"l_s" -> l.lS
)
}
implicit val arbitraryTree: Arbitrary[Tree] = {
def atDepth(depth: Int): Gen[Tree] =
if (depth < 3) {
Gen.oneOf(
arbitrary[String].map(Leaf),
for {
lhs <- atDepth(depth + 1)
rhs <- atDepth(depth + 1)
} yield Node(lhs, rhs)
)
} else arbitrary[String].map(Leaf)
Arbitrary(atDepth(0))
}
implicit val arbitraryTreeWithJsValue: Arbitrary[(Tree, JsValue)] = {
Arbitrary(for (t <- arbitrary[Tree]) yield (t, writeTree(t)))
}
{
lazy val treeReads: Reads[Tree] = flat.reads[Tree]((__ \\ "type").read[String], snakeAdapter(1))
lazy val treeWrites: OWrites[Tree] = flat.owrites((__ \\ "type").write[String], snakeAdapter(1))
implicit lazy val treeFormat: OFormat[Tree] = OFormat.apply[Tree](treeReads, treeWrites)
jsonIdentityLaw[Tree]
}
}
}
def snakeAdapter(max:Int = 2) = new NameAdapter {
var nameMap = Map[String, Int]()
def increment(v1: String) = this.synchronized {
nameMap.get(v1).fold(nameMap += v1 -> 1)(i => nameMap += v1 -> (i + 1))
if (nameMap(v1) > max) throw new RuntimeException(s"Snake conversion applied more than $max times to field: $v1")
}
override def apply(v1: String): String = {
increment(v1)
NameAdapter.snakeCase(v1)
}
}
def jsonIdentityLaw[A](implicit reads: Reads[A], owrites: OWrites[A], arbA: Arbitrary[(A, JsValue)]): Unit =
check((a: (A, JsValue)) => {
reads.reads(a._2).fold(_ => false, r => r == a._1 && owrites.writes(r) == a._2)
})
}
| julienrf/play-json-derived-codecs | library/src/test/scala/julienrf/json/derived/NameAdapterSuite.scala | Scala | mit | 4,804 |
/*
DeadDataPanel.scala
Created on 2011-08-21
This file is part of amiga-tools. Please see README and LICENSE for
more information and licensing details.
*/
package org.dmpp.fatma
import javax.swing._
import javax.swing.event._
import javax.swing.table._
import java.awt.{FlowLayout, Dimension, Color}
import java.awt.event._
import scala.collection.mutable.HashMap
import org.dmpp.os.devices._
class DeadDataPanel(editor: KeyMapEditor) extends JPanel(new FlowLayout(FlowLayout.LEFT)) {
import CharConverter._
import ModifierTitles._
import KeyMapConstants._
var translationValues: Array[Int] = null
private def keymapObject = editor.keymapObject
private def selectedKey = editor.selectedKey
private def flags = keymapObject.flagsFor(selectedKey)
private def deadDataDescriptors = keymapObject.deadDataDescriptorsFor(selectedKey)
private def numDeadDataRows = keymapObject.numDeadDataTableRows
private def numDeadDataColumns = keymapObject.numDeadDataTableColumns
object DeadDescriptorTableModel extends AbstractTableModel {
val ColumnTitles = Array("Modifiers", "Dead Type", "Values", "View")
def getRowCount = deadDataDescriptors.length
def getColumnCount = ColumnTitles.length
override def getColumnName(column: Int) = ColumnTitles(column)
override def isCellEditable(row: Int, column: Int) = {
column == 1 || column == 2 && deadDataDescriptors(row).deadType != Deadable
}
def getValueAt(row: Int, column: Int) = {
try {
if (column == 0) {
if (flags != null) titlesFor(flags)(row) else "?"
} else if (column == 1) {
if (deadDataDescriptors(row) == null) "" else deadDataDescriptors(row).deadType
} else if (column == 2) {
if (deadDataDescriptors(row) == null) ""
else {
deadDataDescriptors(row) match {
case DeadDataNone(value) => "%02x".format(value)
case DeadDataDeadSingle(index) => "%d".format(index)
case DeadDataDeadDouble(index) => "%d".format(index)
case _ => "N/A"
}
}
} else if (column == 3) {
if (deadDataDescriptors(row) == null) ""
else {
deadDataDescriptors(row) match {
case DeadDataNone(value) => convertChar(value.asInstanceOf[Char])
case DeadDataDeadable(_) => "(see table)"
case DeadDataDeadSingle(_) => "(index)"
case DeadDataDeadDouble(_) => "(index)"
case _ => "N/A"
}
}
} else "???"
} catch {
case e: Throwable => e.printStackTrace
"<ERROR>"
}
}
override def setValueAt(value: Object, row: Int, col: Int) {
// saving original state in the closure
val originalKey = selectedKey
val originalDescriptor = deadDataDescriptors(row)
val originalIndex = row
val originalNumRows = keymapObject.numDeadDataTableRows
val originalNumColumns = keymapObject.numDeadDataTableColumns
if (col == 1 && value != deadDataDescriptors(row).deadType) {
// After we changed the descriptor type, we need to check
// the max indexes in the translation table and resize if
// necessary
// An undo has to restore all deadable descriptors
val command = new KeyboardEditorCommand {
val originalDeadableData = new HashMap[Int, Array[DeadDataDescriptor]]
for (keyCode <- 0 until NumKeys) {
if (isDefined(keyCode) && keymapObject.isDead(keyCode)) {
val descriptors = keymapObject.allDeadDataDescriptorsFor(keyCode)
originalDeadableData(keyCode) = descriptors
}
}
def execute {
val newDescriptor = value match {
case DeadNone => DeadDataNone('?'.asInstanceOf[Int])
case DeadSingle => DeadDataDeadSingle(1)
case DeadDouble => DeadDataDeadDouble(1)
case Deadable =>
val translationValues = new Array[Int](keymapObject.deadDataTableSize)
for (i <- 0 until translationValues.length)
translationValues(i) = '?'.asInstanceOf[Int]
DeadDataDeadable(translationValues)
case _ =>
throw new UnsupportedOperationException("unsupported type: " + value)
}
keymapObject.setDeadDataDescriptor(originalKey, originalIndex,
newDescriptor)
keymapObject.resizeTranslationTables
}
def undo {
try {
keymapObject.setAllDeadDataDescriptors(originalDeadableData)
} catch {
case e: Throwable => e.printStackTrace
}
}
}
try {
editor.executeCommand(command)
} catch {
case e: Throwable => e.printStackTrace
}
} else if (col == 2) {
val command = new KeyboardEditorCommand {
def execute {
val newValue = Integer.parseInt(value.toString, 16)
val newDescriptor = originalDescriptor match {
case DeadDataNone(_) => DeadDataNone(newValue)
case DeadDataDeadSingle(_) => DeadDataDeadSingle(newValue)
case DeadDataDeadDouble(_) => DeadDataDeadDouble(newValue)
case _ => null
}
keymapObject.setDeadDataDescriptor(originalKey, originalIndex,
newDescriptor)
keymapObject.resizeTranslationTables
}
def undo {
keymapObject.setDeadDataDescriptor(originalKey, originalIndex,
originalDescriptor)
keymapObject.resizeTranslationTables
}
}
try {
editor.executeCommand(command)
} catch {
case e: Throwable => e.printStackTrace
}
}
}
}
object DeadTranslationTableModel extends AbstractTableModel {
val ColumnTitles = Array("Values", "View")
def getRowCount = if (translationValues != null) numDeadDataRows else 0
def getColumnCount = 2
override def getColumnName(column: Int) = ColumnTitles(column)
def getValueAt(row: Int, column: Int) = {
try {
if (translationValues == null) "-"
else {
val startIndex = row * numDeadDataColumns
val buffer = new StringBuilder
for (i <- 0 until numDeadDataColumns) {
val value = translationValues(startIndex + i)
column match {
case 0 => buffer.append("%02x".format(value))
case _ => buffer.append(convertChar(value.asInstanceOf[Char]))
}
}
buffer.toString
}
} catch {
case e: Throwable =>
e.printStackTrace
throw e
}
}
override def isCellEditable(row: Int, column: Int) = column == 0
override def setValueAt(value: Object, row: Int, col: Int) {
val originalKey = selectedKey
val originalIndex = descriptorTable.getSelectedRow
val originalDescriptor = deadDataDescriptors(row).asInstanceOf[DeadDataDeadable]
val command = new KeyboardEditorCommand {
def execute {
val strvalue = value.toString
val newValues = new Array[Int](keymapObject.deadDataTableSize)
for (i <- 0 until newValues.length)
newValues(i) = originalDescriptor.translationValues(i)
var strIndex = 0
var resultIndex = row * keymapObject.numDeadDataTableColumns
while (strIndex < strvalue.length && resultIndex < newValues.length) {
val hexstr = "" + strvalue(strIndex) + strvalue(strIndex + 1)
val hexnum = Integer.parseInt(hexstr, 16)
newValues(resultIndex) = hexnum
strIndex += 2
resultIndex += 1
}
printf("Set value: %s key: $%02x, index: %d\n", value, originalKey, originalIndex)
keymapObject.setDeadDataDescriptor(originalKey, originalIndex,
DeadDataDeadable(newValues))
// A trick to make the values visible immediately
translationValues = newValues
fireTableCellUpdated(row, col)
}
def undo {
keymapObject.setDeadDataDescriptor(originalKey, originalIndex,
originalDescriptor)
}
}
try {
editor.executeCommandNoUpdateUI(command)
editor.updateKeyboardView
} catch {
case e: Throwable => e.printStackTrace
}
}
}
val deadTypeCombobox = new JComboBox(Array[Object](DeadNone, DeadSingle,
DeadDouble, Deadable))
val descriptorTable = new JTable(DeadDescriptorTableModel) {
showVerticalLines = true
showHorizontalLines = true
gridColor = Color.LIGHT_GRAY
setSelectionMode(ListSelectionModel.SINGLE_SELECTION)
getColumnModel.getColumn(1).setCellEditor(
new DefaultCellEditor(deadTypeCombobox))
}
val scrollPane = new JScrollPane(descriptorTable)
scrollPane.setPreferredSize(new Dimension(400, 160))
setBorder(BorderFactory.createTitledBorder(
BorderFactory.createEtchedBorder, "Dead Data"))
add(scrollPane)
descriptorTable.getSelectionModel.addListSelectionListener(
new ListSelectionListener {
def valueChanged(e: ListSelectionEvent) {
if (!e.getValueIsAdjusting &&
descriptorTable.getSelectedRow >= 0) {
val index = descriptorTable.getSelectedRow
val descriptor = deadDataDescriptors(index)
translationValues = descriptor match {
case DeadDataDeadable(values) => values
case _ => null
}
DeadTranslationTableModel.fireTableDataChanged
}
}
})
val translationTable = new JTable(DeadTranslationTableModel) {
showVerticalLines = true
showHorizontalLines = true
gridColor = Color.LIGHT_GRAY
setSelectionMode(ListSelectionModel.SINGLE_SELECTION)
}
val scrollPane2 = new JScrollPane(translationTable)
scrollPane2.setPreferredSize(new Dimension(300, 160))
add(scrollPane2)
def update {
translationValues = null
DeadDescriptorTableModel.fireTableDataChanged
DeadTranslationTableModel.fireTableDataChanged
}
}
| weiju/fatma | src/main/scala/org/dmpp/fatma/DeadDataPanel.scala | Scala | gpl-3.0 | 10,460 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.local
import org.apache.samza.application.ApplicationUtil
import org.apache.samza.application.descriptors.ApplicationDescriptorUtil
import org.apache.samza.config.JobConfig._
import org.apache.samza.config.ShellCommandConfig._
import org.apache.samza.config.{Config, JobConfig, TaskConfigJava}
import org.apache.samza.container.{SamzaContainer, SamzaContainerListener, TaskName}
import org.apache.samza.context.{ExternalContext, JobContextImpl}
import org.apache.samza.coordinator.JobModelManager
import org.apache.samza.coordinator.stream.CoordinatorStreamManager
import org.apache.samza.job.{StreamJob, StreamJobFactory}
import org.apache.samza.metrics.{JmxServer, MetricsRegistryMap, MetricsReporter}
import org.apache.samza.runtime.ProcessorContext
import org.apache.samza.storage.ChangelogStreamManager
import org.apache.samza.task.{TaskFactory, TaskFactoryUtil}
import org.apache.samza.util.Logging
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* Creates a new Thread job with the given config
*/
class ThreadJobFactory extends StreamJobFactory with Logging {
def getJob(config: Config): StreamJob = {
info("Creating a ThreadJob, which is only meant for debugging.")
val metricsRegistry = new MetricsRegistryMap()
val coordinatorStreamManager = new CoordinatorStreamManager(config, metricsRegistry)
coordinatorStreamManager.register(getClass.getSimpleName)
coordinatorStreamManager.start
coordinatorStreamManager.bootstrap
val changelogStreamManager = new ChangelogStreamManager(coordinatorStreamManager)
val coordinator = JobModelManager(coordinatorStreamManager.getConfig, changelogStreamManager.readPartitionMapping(), metricsRegistry)
val jobModel = coordinator.jobModel
val taskPartitionMappings: mutable.Map[TaskName, Integer] = mutable.Map[TaskName, Integer]()
for (containerModel <- jobModel.getContainers.values) {
for (taskModel <- containerModel.getTasks.values) {
taskPartitionMappings.put(taskModel.getTaskName, taskModel.getChangelogPartition.getPartitionId)
}
}
changelogStreamManager.writePartitionMapping(taskPartitionMappings)
//create necessary checkpoint and changelog streams
val checkpointManager = new TaskConfigJava(jobModel.getConfig).getCheckpointManager(metricsRegistry)
if (checkpointManager != null) {
checkpointManager.createResources()
checkpointManager.stop()
}
ChangelogStreamManager.createChangelogStreams(jobModel.getConfig, jobModel.maxChangeLogStreamPartitions)
val containerId = "0"
var jmxServer: JmxServer = null
if (new JobConfig(config).getJMXEnabled) {
jmxServer = new JmxServer();
}
val appDesc = ApplicationDescriptorUtil.getAppDescriptor(ApplicationUtil.fromConfig(config), config)
val taskFactory: TaskFactory[_] = TaskFactoryUtil.getTaskFactory(appDesc)
// Give developers a nice friendly warning if they've specified task.opts and are using a threaded job.
config.getTaskOpts match {
case Some(taskOpts) => warn("%s was specified in config, but is not being used because job is being executed with ThreadJob. " +
"You probably want to run %s=%s." format(TASK_JVM_OPTS, STREAM_JOB_FACTORY_CLASS, classOf[ProcessJobFactory].getName))
case _ => None
}
val containerListener = {
val processorLifecycleListener = appDesc.getProcessorLifecycleListenerFactory().createInstance(new ProcessorContext() {}, config)
new SamzaContainerListener {
override def afterFailure(t: Throwable): Unit = {
processorLifecycleListener.afterFailure(t)
throw t
}
override def afterStart(): Unit = {
processorLifecycleListener.afterStart()
}
override def afterStop(): Unit = {
processorLifecycleListener.afterStop()
}
override def beforeStart(): Unit = {
processorLifecycleListener.beforeStart()
}
}
}
try {
coordinator.start
val container = SamzaContainer(
containerId,
jobModel,
Map[String, MetricsReporter](),
taskFactory,
JobContextImpl.fromConfigWithDefaults(config),
Option(appDesc.getApplicationContainerContextFactory.orElse(null)),
Option(appDesc.getApplicationTaskContextFactory.orElse(null)),
buildExternalContext(config)
)
container.setContainerListener(containerListener)
val threadJob = new ThreadJob(container)
threadJob
} finally {
coordinator.stop
coordinatorStreamManager.stop()
if (jmxServer != null) {
jmxServer.stop
}
}
}
private def buildExternalContext(config: Config): Option[ExternalContext] = {
/*
* By default, use an empty ExternalContext here. In a custom fork of Samza, this can be implemented to pass
* a non-empty ExternalContext to SamzaContainer. Only config should be used to build the external context. In the
* future, components like the application descriptor may not be available.
*/
None
}
}
| bharathkk/samza | samza-core/src/main/scala/org/apache/samza/job/local/ThreadJobFactory.scala | Scala | apache-2.0 | 5,941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{ByteArrayInputStream, DataInputStream, File, FileOutputStream, IOException,
OutputStreamWriter}
import java.net.{InetAddress, UnknownHostException, URI, URISyntaxException}
import java.nio.ByteBuffer
import java.security.PrivilegedExceptionAction
import java.util.{Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConversions._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.reflect.runtime.universe
import scala.util.{Try, Success, Failure}
import scala.util.control.NonFatal
import com.google.common.base.Charsets.UTF_8
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.io.DataOutputBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.{TokenIdentifier, Token}
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkContext, SparkException}
import org.apache.spark.util.Utils
private[spark] class Client(
val args: ClientArguments,
val hadoopConf: Configuration,
val sparkConf: SparkConf)
extends Logging {
import Client._
def this(clientArgs: ClientArguments, spConf: SparkConf) =
this(clientArgs, SparkHadoopUtil.get.newConfiguration(spConf), spConf)
def this(clientArgs: ClientArguments) = this(clientArgs, new SparkConf())
private val yarnClient = YarnClient.createYarnClient
private val yarnConf = new YarnConfiguration(hadoopConf)
private var credentials: Credentials = null
private val amMemoryOverhead = args.amMemoryOverhead // MB
private val executorMemoryOverhead = args.executorMemoryOverhead // MB
private val distCacheMgr = new ClientDistributedCacheManager()
private val isClusterMode = args.isClusterMode
private var loginFromKeytab = false
private var principal: String = null
private var keytab: String = null
private val fireAndForget = isClusterMode &&
!sparkConf.getBoolean("spark.yarn.submit.waitAppCompletion", true)
def stop(): Unit = {
yarnClient.stop()
// Unset YARN mode system env variable, to allow switching between cluster types.
//取消设置YARN模式系统env变量,以允许在群集类型之间切换
System.clearProperty("SPARK_YARN_MODE")
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
* 将运行ApplicationMaster的应用程序提交到ResourceManager
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
* 稳定的Yarn API提供了一种方便的方法(YarnClient#createApplication),
* 用于创建应用程序和设置应用程序提交上下文,这在alpha API中不可用。
*/
def submitApplication(): ApplicationId = {
var appId: ApplicationId = null
try {
// Setup the credentials before doing anything else,
// so we have don't have issues at any point.
//在做任何其他事情之前设置凭证,所以我们在任何时候都没有问题
setupCredentials()
yarnClient.init(yarnConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
//从我们的RM获取新的应用程序
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
// Verify whether the cluster has enough resources for our AM
//验证群集是否有足够的资源用于AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
//设置适当的上下文以启动我们的AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
//最后,提交并监控申请
logInfo(s"Submitting application ${appId.getId} to ResourceManager")
yarnClient.submitApplication(appContext)
appId
} catch {
case e: Throwable =>
if (appId != null) {
cleanupStagingDir(appId)
}
throw e
}
}
/**
* Cleanup application staging directory.
* 清理应用程序临时目录
*/
private def cleanupStagingDir(appId: ApplicationId): Unit = {
val appStagingDir = getAppStagingDir(appId)
try {
val preserveFiles = sparkConf.getBoolean("spark.yarn.preserve.staging.files", false)
val stagingDirPath = new Path(appStagingDir)
val fs = FileSystem.get(hadoopConf)
if (!preserveFiles && fs.exists(stagingDirPath)) {
logInfo("Deleting staging directory " + stagingDirPath)
fs.delete(stagingDirPath, true)
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + appStagingDir, ioe)
}
}
/**
* Set up the context for submitting our ApplicationMaster.
* 设置提交ApplicationMaster的上下文
* This uses the YarnClientApplication not available in the Yarn alpha API.
* 这使用了Yarn alpha API中没有的YarnClientApplication
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(args.appName)
appContext.setQueue(args.amQueue)
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType("SPARK")
sparkConf.getOption("spark.yarn.maxAppAttempts").map(_.toInt) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug("spark.yarn.maxAppAttempts is not set. " +
"Cluster's default value will be used.")
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(args.amMemory + amMemoryOverhead)
capability.setVirtualCores(args.amCores)
appContext.setResource(capability)
appContext
}
/** Set up security tokens for launching our ApplicationMaster container.
* 设置安全令牌以启动我们的ApplicationMaster容器*/
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val dob = new DataOutputBuffer
credentials.writeTokenStorageToStream(dob)
amContainer.setTokens(ByteBuffer.wrap(dob.getData))
}
/** Get the application report from the ResourceManager for an application we have submitted.
* 从ResourceManager获取我们提交的应用程序的应用程序报告*/
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* 返回此客户端使用的安全令牌以与ApplicationMaster通信
* If no security is enabled, the token returned by the report is null.
* 如果未启用安全性,则报告返回的标记为空
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
* 如果我们请求每个容器的资源多于群集中可用的资源,则会快速失败。
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem = args.executorMemory + executorMemoryOverhead
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory (${args.executorMemory}" +
s"+$executorMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.")
}
val amMem = args.amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory (${args.amMemory}" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* 如果需要,将给定文件复制到远程文件系统(例如HDFS)
* The file is only copied if the source and destination file systems are different. This is used
* for preparing resources for launching the ApplicationMaster container. Exposed for testing.
* 仅在源和目标文件系统不同时才复制该文件,这用于准备启动ApplicationMaster容器的资源,
* 暴露在测试中。
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (!compareFs(srcFs, destFs)) {
destPath = new Path(destDir, srcPath.getName())
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
//解析URI路径中的所有符号链接,以便使用“当前”符号链接指向特定版本,以显示分布式缓存配置中的特定版本
val qualifiedDestPath = destFs.makeQualified(destPath)
val fc = FileContext.getFileContext(qualifiedDestPath.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestPath)
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* 如果需要,将任何资源上载到分布式缓存,如果要在本地使用资源,请为下游代码设置适当的配置以正确处理它。
* This is used for setting up a container launch context for our ApplicationMaster.
* 这用于为ApplicationMaster设置容器启动上下文
* Exposed for testing.
*/
def prepareLocalResources(
appStagingDir: String,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = FileSystem.get(hadoopConf)
val dst = new Path(fs.getHomeDirectory(), appStagingDir)
val nns = YarnSparkHadoopUtil.get.getNameNodesToAccess(sparkConf) + dst
YarnSparkHadoopUtil.get.obtainTokensForNamenodes(nns, hadoopConf, credentials)
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
obtainTokenForHiveMetastore(sparkConf, hadoopConf, credentials)
obtainTokenForHBase(hadoopConf, credentials)
val replication = sparkConf.getInt("spark.yarn.submit.file.replication",
fs.getDefaultReplication(dst)).toShort
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, dst, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val oldLog4jConf = Option(System.getenv("SPARK_LOG4J_CONF"))
if (oldLog4jConf.isDefined) {
logWarning(
"SPARK_LOG4J_CONF detected in the system environment. This variable has been " +
"deprecated. Please refer to the \\"Launching Spark on YARN\\" documentation " +
"for alternatives.")
}
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
if (distributedUris.contains(uriStr)) {
logWarning(s"Resource $uri added multiple times to distributed cache.")
false
} else {
distributedUris += uriStr
true
}
}
/**
* Distribute a file to the cluster.
* 将文件分发到群集
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
* 如果文件的路径是“local:”URI,则它实际上不是分布式的,
*其他文件被复制到HDFS(如果尚未存在)并添加到应用程序的分布式缓存中。
* @param path URI of the file to distribute. 要分发的文件的URI
* @param resType Type of resource being distributed.正在分发的资源类型
* @param destName Name of the file in the distributed cache.分布式缓存中文件的名称
* @param targetDir Subdirectory where to place the file.子目录放置文件的位置
* @param appMasterOnly Whether to distribute only to the AM.是否仅分发给AM
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(dst, localPath, replication)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
//如果我们传入密钥表,请确保将密钥表复制到HDFS上的登台目录,并设置相关的环境变量,以便AM可以再次登录
if (loginFromKeytab) {
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(sparkConf.get("spark.yarn.keytab")),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Copy the given main resource to the distributed cache if the scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
* 如果方案不是“本地”,则将给定的主资源复制到分布式缓存,
* 否则,在我们的SparkConf中设置相应的键以在下游处理它,
* Each resource is represented by a 3-tuple of:
* (1) destination resource name,
* (2) local path to the resource,
* (3) Spark property key to set if the scheme is not local
*/
List(
(SPARK_JAR, sparkJar(sparkConf), CONF_SPARK_JAR),
(APP_JAR, args.userJar, CONF_SPARK_USER_JAR),
("log4j.properties", oldLog4jConf.orNull, null)
).foreach { case (destName, path, confKey) =>
if (path != null && !path.trim().isEmpty()) {
val (isLocal, localizedPath) = distribute(path, destName = Some(destName))
if (isLocal && confKey != null) {
require(localizedPath != null, s"Path $path already distributed.")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(confKey, localizedPath)
}
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* 对通过ClientArguments传入的任何其他资源执行相同操作
* Each resource category is represented by a 3-tuple of:
* 每个资源类别由3元组表示:
* (1) comma separated list of resources in this category,
* 逗号分隔此类别中的资源列表
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(args.addJars, LocalResourceType.FILE, true),
(args.files, LocalResourceType.FILE, false),
(args.archives, LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
if (flist != null && !flist.isEmpty()) {
flist.split(',').foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
require(localizedPath != null)
if (addToClasspath) {
cachedSecondaryJarLinks += localizedPath
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(CONF_SPARK_YARN_SECONDARY_JARS, cachedSecondaryJarLinks.mkString(","))
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f => distribute(f) }
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
//需要特别处理python文件列表,所有非归档文件都需要放在将添加到PYTHONPATH的子目录中
args.pyFiles.foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Distribute an archive with Hadoop and Spark configuration for the AM.
//为AM分发带有Hadoop和Spark配置的存档
val (_, confLocalizedPath) = distribute(createConfArchive().toURI().getPath(),
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_CONF_DIR),
appMasterOnly = true)
require(confLocalizedPath != null)
localResources
}
/**
* Create an archive with the config files for distribution.
* 使用配置文件创建存档以进行分发
* These are only used by the AM, since executors will use the configuration object broadcast by
* the driver. The files are zipped and added to the job as an archive, so that YARN will explode
* it when distributing to the AM. This directory is then added to the classpath of the AM
* process, just to make sure that everybody is using the same default config.
* 这些仅由AM使用,因为执行程序将使用驱动程序广播的配置对象,压缩文件并将其作为存档添加到作业中,
* 以便YARN在分发到AM时将其爆炸,然后将此目录添加到AM进程的类路径中,以确保每个人都使用相同的默认配置。
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
* 这遵循启动脚本设置的优先顺序,其中HADOOP_CONF_DIR在YARN_CONF_DIR之前的类路径中显示
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* 目前,这是conf目录的浅表副本,如果存在Hadoop配置目录包含子目录的情况,则必须修复此代码。
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*
* 存档还包含一些Spark配置,即它将SparkConf的内容保存在要由AM进程加载的文件中
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR").foreach { envKey =>
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
dir.listFiles().foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
try {
confStream.setLevel(0)
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(name))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save Spark configuration to a file in the archive.
//将Spark配置保存到存档中的文件
val props = new Properties()
sparkConf.getAll.foreach { case (k, v) => props.setProperty(k, v) }
confStream.putNextEntry(new ZipEntry(SPARK_CONF_FILE))
val writer = new OutputStreamWriter(confStream, UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
confStream.closeEntry()
} finally {
confStream.close()
}
confArchive
}
/**
* Get the renewal interval for tokens.
* 获取令牌的续订间隔
*/
private def getTokenRenewalInterval(stagingDirPath: Path): Long = {
// We cannot use the tokens generated above since those have renewer yarn. Trying to renew
// those will fail with an access control issue. So create new tokens with the logged in
// user as renewer.
///我们不能使用上面生成的令牌,因为那些具有更新yarn,
// 试图更新它们会因访问控制问题而失败,因此,使用登录用户创建新令牌作为续订者
val creds = new Credentials()
val nns = YarnSparkHadoopUtil.get.getNameNodesToAccess(sparkConf) + stagingDirPath
YarnSparkHadoopUtil.get.obtainTokensForNamenodes(
nns, hadoopConf, creds, Some(sparkConf.get("spark.yarn.principal")))
val t = creds.getAllTokens
.filter(_.getKind == DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
.head
val newExpiration = t.renew(hadoopConf)
val identifier = new DelegationTokenIdentifier()
identifier.readFields(new DataInputStream(new ByteArrayInputStream(t.getIdentifier)))
val interval = newExpiration - identifier.getIssueDate
logInfo(s"Renewal Interval set to $interval")
interval
}
/**
* Set up the environment for launching our ApplicationMaster container.
* 设置启动ApplicationMaster容器的环境
*/
private def setupLaunchEnv(
stagingDir: String,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
val extraCp = sparkConf.getOption("spark.driver.extraClassPath")
populateClasspath(args, yarnConf, sparkConf, env, true, extraCp)
env("SPARK_YARN_MODE") = "true"
env("SPARK_YARN_STAGING_DIR") = stagingDir
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
if (loginFromKeytab) {
val remoteFs = FileSystem.get(hadoopConf)
val stagingDirPath = new Path(remoteFs.getHomeDirectory, stagingDir)
val credentialsFile = "credentials-" + UUID.randomUUID().toString
sparkConf.set(
"spark.yarn.credentials.file", new Path(stagingDirPath, credentialsFile).toString)
logInfo(s"Credentials file set to: $credentialsFile")
val renewalInterval = getTokenRenewalInterval(stagingDirPath)
sparkConf.set("spark.yarn.token.renewal.interval", renewalInterval.toString)
}
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
//选择通过spark.yarn.appMasterEnv.*提供的AM的任何环境变量
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// Keep this for backwards compatibility but users should move to the config
//保持此向前兼容性,但用户应移至配置
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
sys.env.get("SPARK_YARN_USER_ENV").foreach { userEnvs =>
// Allow users to specify some environment variables.
//允许用户指定一些环境变量
YarnSparkHadoopUtil.setEnvFromInputString(env, userEnvs)
// Pass SPARK_YARN_USER_ENV itself to the AM so it can use it to set up executor environments.
//将SPARK_YARN_USER_ENV本身传递给AM,以便它可以使用它来设置执行程序环境
env("SPARK_YARN_USER_ENV") = userEnvs
}
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//如果pyFiles包含任何.py文件,我们还需要将LOCALIZED_PYTHON_DIR添加到容器进程的PYTHONPATH中
//将所有non.py文件直接添加到PYTHONPATH。
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = args.pyFiles.partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(YarnSparkHadoopUtil.expandEnvironment(Environment.PWD),
LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != LOCAL_SCHEME) {
pythonPath += buildPath(YarnSparkHadoopUtil.expandEnvironment(Environment.PWD),
new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
if (pythonPath.nonEmpty) {
val pythonPathStr = (sys.env.get("PYTHONPATH") ++ pythonPath)
.mkString(YarnSparkHadoopUtil.getClassPathSeparator)
env("PYTHONPATH") = pythonPathStr
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathStr)
}
// In cluster mode, if the deprecated SPARK_JAVA_OPTS is set, we need to propagate it to
// executors. But we can't just set spark.executor.extraJavaOptions, because the driver's
// SparkContext will not let that set spark* system properties, which is expected behavior for
// Yarn clients. So propagate it through the environment.
//在集群模式下,如果设置了不推荐的SPARK_JAVA_OPTS,我们需要将其传播给执行程序,
//但是我们不能只设置spark.executor.extraJavaOptions,因为驱动程序的SparkContext不会让它设置spark*系统属性,
//这是Yarn客户端的预期行为,所以在环境中传播它。
// Note that to warn the user about the deprecation in cluster mode, some code from
// SparkConf#validateSettings() is duplicated here (to avoid triggering the condition
// described above).
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
if (isClusterMode) {
sys.env.get("SPARK_JAVA_OPTS").foreach { value =>
val warning =
s"""
|SPARK_JAVA_OPTS was detected (set to '$value').
|This is deprecated in Spark 1.0+.
|
|Please instead use:
| - ./spark-submit with conf/spark-defaults.conf to set defaults for an application
| - ./spark-submit with --driver-java-options to set -X options for a driver
| - spark.executor.extraJavaOptions to set -X options for executors
""".stripMargin
logWarning(warning)
for (proc <- Seq("driver", "executor")) {
val key = s"spark.$proc.extraJavaOptions"
if (sparkConf.contains(key)) {
throw new SparkException(s"Found both $key and SPARK_JAVA_OPTS. Use only the former.")
}
}
env("SPARK_JAVA_OPTS") = value
}
}
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* 设置ContainerLaunchContext以启动我们的ApplicationMaster容器
* This sets up the launch environment, java options, and the command for launching the AM.
* 这将设置启动环境,java选项以及启动AM的命令
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val appStagingDir = getAppStagingDir(appId)
val pySparkArchives =
if (sparkConf.getBoolean("spark.yarn.isPython", false)) {
findPySparkArchives()
} else {
//Nil是一个空的List,::向队列的头部追加数据,创造新的列表
Nil
}
val launchEnv = setupLaunchEnv(appStagingDir, pySparkArchives)
val localResources = prepareLocalResources(appStagingDir, pySparkArchives)
// Set the environment variables to be passed on to the executors.
//设置要传递给执行程序的环境变量
distCacheMgr.setDistFilesEnv(launchEnv)
distCacheMgr.setDistArchivesEnv(launchEnv)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources)
amContainer.setEnvironment(launchEnv)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
//通过命令前缀设置环境变量以附加到变量的现有值
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + args.amMemory + "m"
val tmpDir = new Path(
YarnSparkHadoopUtil.expandEnvironment(Environment.PWD),
YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR
)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
//如果我们要启动驱动程序,请包含特定于驱动程序的java选项
if (isClusterMode) {
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
val driverOpts = sparkConf.getOption("spark.driver.extraJavaOptions")
.orElse(sys.env.get("SPARK_JAVA_OPTS"))
driverOpts.foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sys.props.get("spark.driver.extraLibraryPath"),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(libraryPaths)))
}
if (sparkConf.getOption("spark.yarn.am.extraJavaOptions").isDefined) {
logWarning("spark.yarn.am.extraJavaOptions will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
//在yarn-client模式下验证并包含yarn am特定的java选项
val amOptsKey = "spark.yarn.am.extraJavaOptions"
val amOpts = sparkConf.getOption(amOptsKey)
amOpts.foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"$amOptsKey is not allowed to set Spark options (was '$opts'). "
throw new SparkException(msg)
}
if (opts.contains("-Xmx") || opts.contains("-Xms")) {
val msg = s"$amOptsKey is not allowed to alter memory settings (was '$opts')."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.getOption("spark.yarn.am.extraLibraryPath").foreach { paths =>
prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(Seq(paths))))
}
}
// For log4j configuration to reference
//对于log4j配置来引用
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
//Nil是一个空的List,::向队列的头部追加数据,创造新的列表
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
//Nil是一个空的List,::向队列的头部追加数据,创造新的列表
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++
userArgs ++ Seq(
"--executor-memory", args.executorMemory.toString + "m",
"--executor-cores", args.executorCores.toString,
"--properties-file", buildPath(YarnSparkHadoopUtil.expandEnvironment(Environment.PWD),
LOCALIZED_CONF_DIR, SPARK_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++ Seq(
YarnSparkHadoopUtil.expandEnvironment(Environment.JAVA_HOME) + "/bin/java", "-server"
) ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
launchEnv.foreach { case (k, v) => logDebug(s" $k -> $v") }
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
//将acl设置发送到YARN以控制谁可以通过YARN接口进行访问
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager))
setupSecurityToken(amContainer)
UserGroupInformation.getCurrentUser().addCredentials(credentials)
amContainer
}
def setupCredentials(): Unit = {
loginFromKeytab = args.principal != null || sparkConf.contains("spark.yarn.principal")
if (loginFromKeytab) {
principal =
if (args.principal != null) args.principal else sparkConf.get("spark.yarn.principal")
keytab = {
if (args.keytab != null) {
args.keytab
} else {
sparkConf.getOption("spark.yarn.keytab").orNull
}
}
require(keytab != null, "Keytab must be specified when principal is specified.")
logInfo("Attempting to login to the Kerberos" +
s" using principal: $principal and keytab: $keytab")
val f = new File(keytab)
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
//生成可用于keytab文件的文件名,该文件名不与任何用户文件冲突
val keytabFileName = f.getName + "-" + UUID.randomUUID().toString
sparkConf.set("spark.yarn.keytab", keytabFileName)
sparkConf.set("spark.yarn.principal", principal)
}
credentials = UserGroupInformation.getCurrentUser.getCredentials
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
* 报告一个应用程序的状态,直到它成功退出,或者由于某些失败,然后返回一对yarn应用状态(完成、失败、死亡或运行)
* 和最终的应用状态(未定义、成功、失败或被杀死)
*
* @param appId ID of the application to monitor. 要监视的应用程序的ID
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* 是否也在运行时返回应用程序状态
* @param logApplicationReport Whether to log details of the application report every iteration.
* 是否每次迭代都记录应用程序报告的详细信息
* @return A pair of the yarn application state and the final application state.
* 一对Yarn的使用状态和最终的应用状态
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true): (YarnApplicationState, FinalApplicationStatus) = {
val interval = sparkConf.getLong("spark.yarn.report.interval", 1000)
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
return (YarnApplicationState.KILLED, FinalApplicationStatus.KILLED)
case NonFatal(e) =>
logError(s"Failed to contact YARN for application $appId.", e)
return (YarnApplicationState.FAILED, FinalApplicationStatus.FAILED)
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
//如果启用了DEBUG,则每次迭代都会记录报告详细信息
//否则,每次应用程序更改状态时都记录它们
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report))
} else if (lastState != state) {
logInfo(formatReportDetails(report))
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir(appId)
return (state, report.getFinalApplicationStatus)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return (state, report.getFinalApplicationStatus)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def formatReportDetails(report: ApplicationReport): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
)
// Use more loggable format if value is null or empty
//如果value为null或为空,则使用更多可记录格式
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Submit an application to the ResourceManager.
* 将应用程序提交到ResourceManager
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
* 如果将spark.yarn.submit.waitAppCompletion设置为true,
* 它将保持活动状态,报告应用程序的状态,直到应用程序因任何原因退出。
* 否则,客户端进程将在提交后退出,如果应用程序以失败,
* 终止或未定义状态完成,则抛出适当的SparkException。
*/
def run(): Unit = {
val appId = submitApplication()
if (fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val (yarnApplicationState, finalApplicationStatus) = monitorApplication(appId)
if (yarnApplicationState == YarnApplicationState.FAILED ||
finalApplicationStatus == FinalApplicationStatus.FAILED) {
throw new SparkException(s"Application $appId finished with failed status")
}
if (yarnApplicationState == YarnApplicationState.KILLED ||
finalApplicationStatus == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalApplicationStatus == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
"pyspark.zip not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, "py4j-0.8.2.1-src.zip")
require(py4jFile.exists(),
"py4j-0.8.2.1-src.zip not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
object Client extends Logging {
def main(argStrings: Array[String]) {
if (!sys.props.contains("SPARK_SUBMIT")) {
logWarning("WARNING: This client is deprecated and will be removed in a " +
"future version of Spark. Use ./bin/spark-submit with \\"--master yarn\\"")
}
// Set an env variable indicating we are running in YARN mode.
//设置一个env变量,表示我们正在YARN模式下运行
// Note that any env variable with the SPARK_ prefix gets propagated to all (remote) processes
//请注意,具有SPARK_前缀的任何env变量都会传播到所有(远程)进程
System.setProperty("SPARK_YARN_MODE", "true")
val sparkConf = new SparkConf
val args = new ClientArguments(argStrings, sparkConf)
// to maintain backwards-compatibility
//保持向后兼容性
if (!Utils.isDynamicAllocationEnabled(sparkConf)) {
sparkConf.setIfMissing("spark.executor.instances", args.numExecutors.toString)
}
new Client(args, sparkConf).run()
}
// Alias for the Spark assembly jar and the user jar
//Spark程序集jar和用户jar的别名
val SPARK_JAR: String = "__spark__.jar"
val APP_JAR: String = "__app__.jar"
// URI scheme that identifies local resources
//标识本地资源的URI方案
val LOCAL_SCHEME = "local"
// Staging directory for any temporary jars or files
//任何临时jar或文件的暂存目录
val SPARK_STAGING: String = ".sparkStaging"
// Location of any user-defined Spark jars
//任何用户定义的Spark jar的位置
val CONF_SPARK_JAR = "spark.yarn.jar"
val ENV_SPARK_JAR = "SPARK_JAR"
// Internal config to propagate the location of the user's jar to the driver/executors
//内部配置将用户jar的位置传播给驱动程序/执行程序
val CONF_SPARK_USER_JAR = "spark.yarn.user.jar"
// Internal config to propagate the locations of any extra jars to add to the classpath
// of the executors
//内部配置,用于传播任何额外jar的位置,以添加到执行程序的类路径中
val CONF_SPARK_YARN_SECONDARY_JARS = "spark.yarn.secondary.jars"
// Staging directory is private! -> rwx--------
//暂存目录是私有的! - > rwx --------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
//应用程序文件在世界范围内可读并且所有者可写 - > rw-r - r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
//要添加到进程的分发定义的类路径
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
//将放置用户的Spark和Hadoop配置文件的子目录
val LOCALIZED_CONF_DIR = "__spark_conf__"
// Name of the file in the conf archive containing Spark configuration.
//包含Spark配置的conf存档中文件的名称
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
//将放置用户的python文件(不是存档)的子目录
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
/**
* Find the user-defined Spark jar if configured, or return the jar containing this
* class if not.
* 如果已配置,请查找用户定义的Spark jar,否则返回包含此类的jar
*
* This method first looks in the SparkConf object for the CONF_SPARK_JAR key, and in the
* user environment if that is not found (for backwards compatibility).
* 此方法首先在SparkConf对象中查找CONF_SPARK_JAR键,如果未找到,则在用户环境中查找(为了向后兼容)。
*/
private def sparkJar(conf: SparkConf): String = {
if (conf.contains(CONF_SPARK_JAR)) {
conf.get(CONF_SPARK_JAR)
} else if (System.getenv(ENV_SPARK_JAR) != null) {
logWarning(
s"$ENV_SPARK_JAR detected in the system environment. This variable has been deprecated " +
s"in favor of the $CONF_SPARK_JAR configuration variable.")
System.getenv(ENV_SPARK_JAR)
} else {
SparkContext.jarOfClass(this.getClass).head
}
}
/**
* Return the path to the given application's staging directory.
* 返回给定应用程序的临时目录的路径
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
* 使用通过Hadoop和Yarn配置指定的任何应用程序类路径填充给定环境映射中的类路径条目
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
for (c <- classPathElementsToAdd.flatten) {
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Option[Seq[String]] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => Some(s.toSeq)
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Option[Seq[String]] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => Some(s.toSeq)
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Option[Seq[String]] = {
val triedDefault = Try[Seq[String]] {
val field = classOf[YarnConfiguration].getField("DEFAULT_YARN_APPLICATION_CLASSPATH")
val value = field.get(null).asInstanceOf[Array[String]]
value.toSeq
} recoverWith {
case e: NoSuchFieldException => Success(Seq.empty[String])
}
triedDefault match {
case f: Failure[_] =>
logError("Unable to obtain the default YARN Application classpath.", f.exception)
case s: Success[Seq[String]] =>
logDebug(s"Using the default YARN application classpath: ${s.get.mkString(",")}")
}
triedDefault.toOption
}
/**
* In Hadoop 0.23, the MR application classpath comes with the YARN application
* classpath. In Hadoop 2.0, it's an array of Strings, and in 2.2+ it's a String.
* So we need to use reflection to retrieve it.
* 在Hadoop 0.23中,MR应用程序类路径随YARN应用程序类路径一起提供,
* 在Hadoop 2.0中,它是一个字符串数组,在2.2+中它是一个字符串,所以我们需要使用反射来检索它
*/
private[yarn] def getDefaultMRApplicationClasspath: Option[Seq[String]] = {
val triedDefault = Try[Seq[String]] {
val field = classOf[MRJobConfig].getField("DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH")
val value = if (field.getType == classOf[String]) {
StringUtils.getStrings(field.get(null).asInstanceOf[String]).toArray
} else {
field.get(null).asInstanceOf[Array[String]]
}
value.toSeq
} recoverWith {
case e: NoSuchFieldException => Success(Seq.empty[String])
}
triedDefault match {
case f: Failure[_] =>
logError("Unable to obtain the default MR Application classpath.", f.exception)
case s: Success[Seq[String]] =>
logDebug(s"Using the default MR application classpath: ${s.get.mkString(",")}")
}
triedDefault.toOption
}
/**
* Populate the classpath entry in the given environment map.
* 填充给定环境映射中的类路径条目
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* 用户jar通常不会添加到JVM的系统类路径中; 这些由AM和执行程序后端处理,
* 当使用不推荐使用的`spark.yarn.user.classpath.first`时,用户jar包含在系统类路径中,
* 额外的类路径和其他上载的文件始终通过系统类路径提供。
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
isAM: Boolean,
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(YarnSparkHadoopUtil.expandEnvironment(Environment.PWD), env)
if (isAM) {
addClasspathEntry(
YarnSparkHadoopUtil.expandEnvironment(Environment.PWD) + Path.SEPARATOR +
LOCALIZED_CONF_DIR, env)
}
if (sparkConf.getBoolean("spark.yarn.user.classpath.first", false)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
//为了在用户类路径首先正确添加app jar,我们必须将mainJar分开,
//以便将正确的东西发送到addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.getOption(CONF_SPARK_USER_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(args.addJars))
} else {
getSecondaryJarUris(sparkConf.getOption(CONF_SPARK_YARN_SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
addFileToClasspath(sparkConf, conf, new URI(sparkJar(sparkConf)), SPARK_JAR, env)
populateHadoopClasspath(conf, env)
//System.getenv()和System.getProperties()的区别
//System.getenv() 返回系统环境变量值 设置系统环境变量:当前登录用户主目录下的".bashrc"文件中可以设置系统环境变量
//System.getProperties() 返回Java进程变量值 通过命令行参数的"-D"选项
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
}
/**
* Returns a list of URIs representing the user classpath.
* 返回表示用户类路径的URI列表
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.getOption(CONF_SPARK_USER_JAR))
val secondaryUris = getSecondaryJarUris(conf.getOption(CONF_SPARK_YARN_SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR)))
}
private def getSecondaryJarUris(secondaryJars: Option[String]): Seq[URI] = {
secondaryJars.map(_.split(",")).toSeq.flatten.map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
* 将给定路径添加到类路径,正确处理“local:”URI
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
* 如果给出了文件的备用名称,并且它不是“local:”文件,则备用名称将添加到类路径中(相对于作业的工作目录)
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
* 如果不是“local:”文件而没有备用名称,则linkName将添加到类路径中
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(
YarnSparkHadoopUtil.expandEnvironment(Environment.PWD), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(
YarnSparkHadoopUtil.expandEnvironment(Environment.PWD), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* 将给定路径添加到给定环境映射的类路径条目
* If the classpath is already set, this appends the new path to the existing classpath.
* 如果已设置类路径,则会将新路径附加到现有类路径
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
* 返回要在网关上有效的路径发送到NM的路径
* This method uses two configuration values:
* 此方法使用两个配置值:
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* spark.yarn.config.gatewayPath:一个字符串,用于标识可能仅在网关节点中有效的输入路径的一部分。
*
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
* - spark.yarn.config.replacementPath:用于替换网关路径的字符串,这可能包含,
* 例如:env变量引用,当NM时将由NM扩展起始容器。
* If either config is not available, the input path is returned.
* 如果任一配置不可用,则返回输入路径
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get("spark.yarn.config.gatewayPath", null)
val clusterPath = conf.get("spark.yarn.config.replacementPath", null)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Obtains token for the Hive metastore and adds them to the credentials.
* 获取Hive Metastore的令牌并将其添加到凭据
*/
private def obtainTokenForHiveMetastore(
sparkConf: SparkConf,
conf: Configuration,
credentials: Credentials) {
if (UserGroupInformation.isSecurityEnabled) {
YarnSparkHadoopUtil.get.obtainTokenForHiveMetastore(conf).foreach {
credentials.addToken(new Text("hive.server2.delegation.token"), _)
}
}
}
/**
* Obtain security token for HBase.
* 获取HBase的安全令牌
*/
def obtainTokenForHBase(conf: Configuration, credentials: Credentials): Unit = {
if (UserGroupInformation.isSecurityEnabled) {
val mirror = universe.runtimeMirror(getClass.getClassLoader)
try {
val confCreate = mirror.classLoader.
loadClass("org.apache.hadoop.hbase.HBaseConfiguration").
getMethod("create", classOf[Configuration])
val obtainToken = mirror.classLoader.
loadClass("org.apache.hadoop.hbase.security.token.TokenUtil").
getMethod("obtainToken", classOf[Configuration])
logDebug("Attempting to fetch HBase security token.")
val hbaseConf = confCreate.invoke(null, conf).asInstanceOf[Configuration]
if ("kerberos" == hbaseConf.get("hbase.security.authentication")) {
val token = obtainToken.invoke(null, hbaseConf).asInstanceOf[Token[TokenIdentifier]]
credentials.addToken(token.getService, token)
logInfo("Added HBase security token to credentials.")
}
} catch {
case e: java.lang.NoSuchMethodException =>
logInfo("HBase Method not found: " + e)
case e: java.lang.ClassNotFoundException =>
logDebug("HBase Class not found: " + e)
case e: java.lang.NoClassDefFoundError =>
logDebug("HBase Class not found: " + e)
case e: Exception =>
logError("Exception when obtaining HBase security token: " + e)
}
}
}
/**
* Return whether the two file systems are the same.
* 返回两个文件系统是否相同
*/
private def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
//在HA或使用viewfs时,URI的主机部分实际上可能不是主机,而是HDFS命名空间的名称,
//这些名称将无法解决,因此如果匹配,请避免尝试
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* 给定本地URI,解析它并返回与URI对应的限定本地路径。
* This is used for preparing local resources to be included in the container launch context.
* 这用于准备要包含在容器启动上下文中的本地资源
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
//如果未指定,则假设这是在本地文件系统中,以保持行为与Hadoop的行为一致
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
* 是否考虑用户提供的jar在加载用户类时优先于Spark jar
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.getBoolean("spark.driver.userClassPathFirst", false)
} else {
conf.getBoolean("spark.executor.userClassPathFirst", false)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
* 使用Path.SEPARATOR连接所有路径组件
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
}
| tophua/spark1.52 | yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala | Scala | apache-2.0 | 69,330 |
package com.sksamuel.elastic4s.get
import java.util
import com.sksamuel.elastic4s.IndexAndType
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse
import org.elasticsearch.cluster.metadata.MappingMetaData
import scala.collection.JavaConverters._
case class GetMappingsResult(original: GetMappingsResponse) {
@deprecated("use .mappings to use scala maps, or use original.getMappings to use the java client", "2.0")
def getMappings = original.getMappings
def mappingsFor(index: String): Map[String, MappingMetaData] = mappings.getOrElse(index, Map.empty)
def mappingFor(indexAndType: IndexAndType): MappingMetaData = mappings(indexAndType.index)(indexAndType.`type`)
def propertiesFor(indexAndType: IndexAndType): Map[String, Any] =
mappingFor(indexAndType).sourceAsMap().get("properties").asInstanceOf[util.Map[String, _]].asScala.toMap
def fieldFor(indexAndType: IndexAndType, field: String): Map[String, Any] =
propertiesFor(indexAndType).get("field").asInstanceOf[util.Map[String, _]].asScala.toMap
// returns mappings of index name to a map of types to mapping data
def mappings: Map[String, Map[String, MappingMetaData]] = {
original.mappings.iterator.asScala.map { x =>
x.key -> x.value.iterator.asScala.map { y => y.key -> y.value }.toMap
}.toMap
}
}
| tyth/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/get/GetMappingsResult.scala | Scala | apache-2.0 | 1,333 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.